body_hash stringlengths 64 64 | body stringlengths 23 109k | docstring stringlengths 1 57k | path stringlengths 4 198 | name stringlengths 1 115 | repository_name stringlengths 7 111 | repository_stars float64 0 191k | lang stringclasses 1 value | body_without_docstring stringlengths 14 108k | unified stringlengths 45 133k |
|---|---|---|---|---|---|---|---|---|---|
c4be5133985df4c644f1b32b1878aa6ede747c408f0a69a27817fd33c85e5faf | def test_and_empty(self):
'Intersection test - with explict zeros'
a = Fiber([1, 5, 8, 9], [0, 6, 0, 10])
b = Fiber([1, 5, 8, 9], [2, 0, 0, 11])
ab_ref = Fiber([9], [(10, 11)])
ab = (a & b)
self.assertEqual(ab, ab_ref) | Intersection test - with explict zeros | test/test_fiber.py | test_and_empty | Fibertree-Project/fibertree | 2 | python | def test_and_empty(self):
a = Fiber([1, 5, 8, 9], [0, 6, 0, 10])
b = Fiber([1, 5, 8, 9], [2, 0, 0, 11])
ab_ref = Fiber([9], [(10, 11)])
ab = (a & b)
self.assertEqual(ab, ab_ref) | def test_and_empty(self):
a = Fiber([1, 5, 8, 9], [0, 6, 0, 10])
b = Fiber([1, 5, 8, 9], [2, 0, 0, 11])
ab_ref = Fiber([9], [(10, 11)])
ab = (a & b)
self.assertEqual(ab, ab_ref)<|docstring|>Intersection test - with explict zeros<|endoftext|> |
a0aa308ae01d838641f3f6ae3c128c7916fdb9b5e252d00387c4d28474d25202 | def test_or(self):
'Union test'
a = Fiber([1, 5, 8, 9], [2, 6, 9, 10])
b = Fiber([0, 5, 9], [2, 7, 11])
ab_ref = Fiber([0, 1, 5, 8, 9], [('B', 0, 2), ('A', 2, 0), ('AB', 6, 7), ('A', 9, 0), ('AB', 10, 11)])
ab = (a | b)
self.assertEqual(ab, ab_ref) | Union test | test/test_fiber.py | test_or | Fibertree-Project/fibertree | 2 | python | def test_or(self):
a = Fiber([1, 5, 8, 9], [2, 6, 9, 10])
b = Fiber([0, 5, 9], [2, 7, 11])
ab_ref = Fiber([0, 1, 5, 8, 9], [('B', 0, 2), ('A', 2, 0), ('AB', 6, 7), ('A', 9, 0), ('AB', 10, 11)])
ab = (a | b)
self.assertEqual(ab, ab_ref) | def test_or(self):
a = Fiber([1, 5, 8, 9], [2, 6, 9, 10])
b = Fiber([0, 5, 9], [2, 7, 11])
ab_ref = Fiber([0, 1, 5, 8, 9], [('B', 0, 2), ('A', 2, 0), ('AB', 6, 7), ('A', 9, 0), ('AB', 10, 11)])
ab = (a | b)
self.assertEqual(ab, ab_ref)<|docstring|>Union test<|endoftext|> |
42047e75f8ce2d49d876d937417b1d452967265d7488368dd8d01eeea20757a0 | def test_or_empty(self):
'Uniontest - with explict zeros'
a = Fiber([1, 5, 8, 9], [0, 6, 0, 10])
b = Fiber([1, 5, 8, 9], [2, 0, 0, 11])
ab_ref = Fiber([1, 5, 9], [('B', 0, 2), ('A', 6, 0), ('AB', 10, 11)])
ab = (a | b)
self.assertEqual(ab, ab_ref) | Uniontest - with explict zeros | test/test_fiber.py | test_or_empty | Fibertree-Project/fibertree | 2 | python | def test_or_empty(self):
a = Fiber([1, 5, 8, 9], [0, 6, 0, 10])
b = Fiber([1, 5, 8, 9], [2, 0, 0, 11])
ab_ref = Fiber([1, 5, 9], [('B', 0, 2), ('A', 6, 0), ('AB', 10, 11)])
ab = (a | b)
self.assertEqual(ab, ab_ref) | def test_or_empty(self):
a = Fiber([1, 5, 8, 9], [0, 6, 0, 10])
b = Fiber([1, 5, 8, 9], [2, 0, 0, 11])
ab_ref = Fiber([1, 5, 9], [('B', 0, 2), ('A', 6, 0), ('AB', 10, 11)])
ab = (a | b)
self.assertEqual(ab, ab_ref)<|docstring|>Uniontest - with explict zeros<|endoftext|> |
2824864c442f96131dcb56b2650f1a9dca96ee3d1a007b367ef0cb5a9876a528 | def test_or_2d(self):
'Union test 2d'
a1 = [[1, 2, 3, 0], [1, 0, 3, 4], [0, 2, 3, 4], [1, 2, 0, 4]]
a2 = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
a3 = [[2, 3, 4, 5], [0, 0, 0, 0], [1, 0, 3, 4], [1, 2, 0, 4]]
b1 = a2
b2 = a1
b3 = a3
au = [a1, a2, a3]
bu = [b1, b2, b3]
a = Fiber.fromUncompressed(au)
b = Fiber.fromUncompressed(bu)
x = (a | b)
ab_ref = ['A', 'B', 'AB']
a1_fiber = Fiber.fromUncompressed(a1)
a2_fiber = Fiber([], [])
a3_fiber = Fiber.fromUncompressed(a3)
ab_a_ref = [a1_fiber, a2_fiber, a3_fiber]
ab_b_ref = [a2_fiber, a1_fiber, a3_fiber]
for (n, (c, (ab, ab_a, ab_b))) in enumerate(x):
self.assertEqual(ab, ab_ref[n])
self.assertEqual(ab_a, ab_a_ref[n])
self.assertEqual(ab_b, ab_b_ref[n]) | Union test 2d | test/test_fiber.py | test_or_2d | Fibertree-Project/fibertree | 2 | python | def test_or_2d(self):
a1 = [[1, 2, 3, 0], [1, 0, 3, 4], [0, 2, 3, 4], [1, 2, 0, 4]]
a2 = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
a3 = [[2, 3, 4, 5], [0, 0, 0, 0], [1, 0, 3, 4], [1, 2, 0, 4]]
b1 = a2
b2 = a1
b3 = a3
au = [a1, a2, a3]
bu = [b1, b2, b3]
a = Fiber.fromUncompressed(au)
b = Fiber.fromUncompressed(bu)
x = (a | b)
ab_ref = ['A', 'B', 'AB']
a1_fiber = Fiber.fromUncompressed(a1)
a2_fiber = Fiber([], [])
a3_fiber = Fiber.fromUncompressed(a3)
ab_a_ref = [a1_fiber, a2_fiber, a3_fiber]
ab_b_ref = [a2_fiber, a1_fiber, a3_fiber]
for (n, (c, (ab, ab_a, ab_b))) in enumerate(x):
self.assertEqual(ab, ab_ref[n])
self.assertEqual(ab_a, ab_a_ref[n])
self.assertEqual(ab_b, ab_b_ref[n]) | def test_or_2d(self):
a1 = [[1, 2, 3, 0], [1, 0, 3, 4], [0, 2, 3, 4], [1, 2, 0, 4]]
a2 = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
a3 = [[2, 3, 4, 5], [0, 0, 0, 0], [1, 0, 3, 4], [1, 2, 0, 4]]
b1 = a2
b2 = a1
b3 = a3
au = [a1, a2, a3]
bu = [b1, b2, b3]
a = Fiber.fromUncompressed(au)
b = Fiber.fromUncompressed(bu)
x = (a | b)
ab_ref = ['A', 'B', 'AB']
a1_fiber = Fiber.fromUncompressed(a1)
a2_fiber = Fiber([], [])
a3_fiber = Fiber.fromUncompressed(a3)
ab_a_ref = [a1_fiber, a2_fiber, a3_fiber]
ab_b_ref = [a2_fiber, a1_fiber, a3_fiber]
for (n, (c, (ab, ab_a, ab_b))) in enumerate(x):
self.assertEqual(ab, ab_ref[n])
self.assertEqual(ab_a, ab_a_ref[n])
self.assertEqual(ab_b, ab_b_ref[n])<|docstring|>Union test 2d<|endoftext|> |
34ebccce915b4d4d3a7dd67d3b7752c9e2931212a3a1938604ce7c272fbb53c2 | def test_xor(self):
'Xor test'
a = Fiber([1, 5, 8, 9], [2, 6, 9, 10])
b = Fiber([0, 5, 9], [2, 7, 11])
ab_ref = Fiber([0, 1, 8], [('B', 0, 2), ('A', 2, 0), ('A', 9, 0)])
ab = (a ^ b)
self.assertEqual(ab, ab_ref) | Xor test | test/test_fiber.py | test_xor | Fibertree-Project/fibertree | 2 | python | def test_xor(self):
a = Fiber([1, 5, 8, 9], [2, 6, 9, 10])
b = Fiber([0, 5, 9], [2, 7, 11])
ab_ref = Fiber([0, 1, 8], [('B', 0, 2), ('A', 2, 0), ('A', 9, 0)])
ab = (a ^ b)
self.assertEqual(ab, ab_ref) | def test_xor(self):
a = Fiber([1, 5, 8, 9], [2, 6, 9, 10])
b = Fiber([0, 5, 9], [2, 7, 11])
ab_ref = Fiber([0, 1, 8], [('B', 0, 2), ('A', 2, 0), ('A', 9, 0)])
ab = (a ^ b)
self.assertEqual(ab, ab_ref)<|docstring|>Xor test<|endoftext|> |
29cd07a33be19991132a232729e91eb787b6e55763739c31cebe23d903e76d2a | def test_xor_2d(self):
'Union test 2d'
a1 = [[1, 2, 3, 0], [1, 0, 3, 4], [0, 2, 3, 4], [1, 2, 0, 4]]
a2 = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
a3 = [[2, 3, 4, 5], [0, 0, 0, 0], [1, 0, 3, 4], [1, 2, 0, 4]]
b1 = a2
b2 = a1
b3 = a3
au = [a1, a2, a3]
bu = [b1, b2, b3]
abu_ref = [a1, b2, []]
a = Fiber.fromUncompressed(au)
b = Fiber.fromUncompressed(bu)
x = (a ^ b)
ab_ref = ['A', 'B']
a1_fiber = Fiber.fromUncompressed(a1)
a2_fiber = Fiber([], [])
ab_a_ref = [a1_fiber, a2_fiber]
ab_b_ref = [a2_fiber, a1_fiber]
for (n, (c, (ab, ab_a, ab_b))) in enumerate(x):
self.assertEqual(ab, ab_ref[n])
self.assertEqual(ab_a, ab_a_ref[n])
self.assertEqual(ab_b, ab_b_ref[n]) | Union test 2d | test/test_fiber.py | test_xor_2d | Fibertree-Project/fibertree | 2 | python | def test_xor_2d(self):
a1 = [[1, 2, 3, 0], [1, 0, 3, 4], [0, 2, 3, 4], [1, 2, 0, 4]]
a2 = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
a3 = [[2, 3, 4, 5], [0, 0, 0, 0], [1, 0, 3, 4], [1, 2, 0, 4]]
b1 = a2
b2 = a1
b3 = a3
au = [a1, a2, a3]
bu = [b1, b2, b3]
abu_ref = [a1, b2, []]
a = Fiber.fromUncompressed(au)
b = Fiber.fromUncompressed(bu)
x = (a ^ b)
ab_ref = ['A', 'B']
a1_fiber = Fiber.fromUncompressed(a1)
a2_fiber = Fiber([], [])
ab_a_ref = [a1_fiber, a2_fiber]
ab_b_ref = [a2_fiber, a1_fiber]
for (n, (c, (ab, ab_a, ab_b))) in enumerate(x):
self.assertEqual(ab, ab_ref[n])
self.assertEqual(ab_a, ab_a_ref[n])
self.assertEqual(ab_b, ab_b_ref[n]) | def test_xor_2d(self):
a1 = [[1, 2, 3, 0], [1, 0, 3, 4], [0, 2, 3, 4], [1, 2, 0, 4]]
a2 = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
a3 = [[2, 3, 4, 5], [0, 0, 0, 0], [1, 0, 3, 4], [1, 2, 0, 4]]
b1 = a2
b2 = a1
b3 = a3
au = [a1, a2, a3]
bu = [b1, b2, b3]
abu_ref = [a1, b2, []]
a = Fiber.fromUncompressed(au)
b = Fiber.fromUncompressed(bu)
x = (a ^ b)
ab_ref = ['A', 'B']
a1_fiber = Fiber.fromUncompressed(a1)
a2_fiber = Fiber([], [])
ab_a_ref = [a1_fiber, a2_fiber]
ab_b_ref = [a2_fiber, a1_fiber]
for (n, (c, (ab, ab_a, ab_b))) in enumerate(x):
self.assertEqual(ab, ab_ref[n])
self.assertEqual(ab_a, ab_a_ref[n])
self.assertEqual(ab_b, ab_b_ref[n])<|docstring|>Union test 2d<|endoftext|> |
1ce3fa826248473eaee949991472ed76b3e415344db676bc94f85a5e03f625ed | def test_xor_empty(self):
'Uniontest - with explict zeros'
a = Fiber([1, 5, 8, 9], [0, 6, 0, 10])
b = Fiber([1, 5, 8, 9], [2, 0, 0, 11])
ab_ref = Fiber([1, 5], [('B', 0, 2), ('A', 6, 0)])
ab = (a ^ b)
self.assertEqual(ab, ab_ref) | Uniontest - with explict zeros | test/test_fiber.py | test_xor_empty | Fibertree-Project/fibertree | 2 | python | def test_xor_empty(self):
a = Fiber([1, 5, 8, 9], [0, 6, 0, 10])
b = Fiber([1, 5, 8, 9], [2, 0, 0, 11])
ab_ref = Fiber([1, 5], [('B', 0, 2), ('A', 6, 0)])
ab = (a ^ b)
self.assertEqual(ab, ab_ref) | def test_xor_empty(self):
a = Fiber([1, 5, 8, 9], [0, 6, 0, 10])
b = Fiber([1, 5, 8, 9], [2, 0, 0, 11])
ab_ref = Fiber([1, 5], [('B', 0, 2), ('A', 6, 0)])
ab = (a ^ b)
self.assertEqual(ab, ab_ref)<|docstring|>Uniontest - with explict zeros<|endoftext|> |
5d8ff98d404a7477c5ed889059896605bfd2dd57524ac5d4ec082992af78b07e | def test_diff(self):
'Difference test'
a = Fiber([1, 5, 8, 9, 12, 14], [2, 6, 9, 10, 0, 0])
b = Fiber([0, 5, 9, 12], [2, 7, 0, 5])
ab_ref = Fiber([1, 8, 9, 14], [2, 9, 10, 0])
ab = (a - b)
self.assertEqual(ab, ab_ref) | Difference test | test/test_fiber.py | test_diff | Fibertree-Project/fibertree | 2 | python | def test_diff(self):
a = Fiber([1, 5, 8, 9, 12, 14], [2, 6, 9, 10, 0, 0])
b = Fiber([0, 5, 9, 12], [2, 7, 0, 5])
ab_ref = Fiber([1, 8, 9, 14], [2, 9, 10, 0])
ab = (a - b)
self.assertEqual(ab, ab_ref) | def test_diff(self):
a = Fiber([1, 5, 8, 9, 12, 14], [2, 6, 9, 10, 0, 0])
b = Fiber([0, 5, 9, 12], [2, 7, 0, 5])
ab_ref = Fiber([1, 8, 9, 14], [2, 9, 10, 0])
ab = (a - b)
self.assertEqual(ab, ab_ref)<|docstring|>Difference test<|endoftext|> |
e0cf1a9d7b35a5b0774439f128d8e88ff79ae9ff422ca776fd7a72fead0f0347 | def test_assignment(self):
'Assignment test'
a = Fiber([0, 5, 9], [0, 10, 0])
b = Fiber([1, 5, 8, 9, 14], [2, 6, 9, 10, 0])
ab_ref = Fiber([1, 5, 8, 9], [(0, 2), (10, 6), (0, 9), (0, 10)])
ab = (a << b)
self.assertEqual(ab, ab_ref) | Assignment test | test/test_fiber.py | test_assignment | Fibertree-Project/fibertree | 2 | python | def test_assignment(self):
a = Fiber([0, 5, 9], [0, 10, 0])
b = Fiber([1, 5, 8, 9, 14], [2, 6, 9, 10, 0])
ab_ref = Fiber([1, 5, 8, 9], [(0, 2), (10, 6), (0, 9), (0, 10)])
ab = (a << b)
self.assertEqual(ab, ab_ref) | def test_assignment(self):
a = Fiber([0, 5, 9], [0, 10, 0])
b = Fiber([1, 5, 8, 9, 14], [2, 6, 9, 10, 0])
ab_ref = Fiber([1, 5, 8, 9], [(0, 2), (10, 6), (0, 9), (0, 10)])
ab = (a << b)
self.assertEqual(ab, ab_ref)<|docstring|>Assignment test<|endoftext|> |
e4094e13778fc8bff2391ebb20a18d381732ff1958d7b5c386885749b6385a49 | def test_flatten(self):
'Test flattening/unflattening 1 level'
u_t = [[[1, 2, 3, 0], [1, 0, 3, 4], [0, 2, 3, 4], [1, 2, 0, 4]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 2, 3, 0], [1, 0, 3, 4], [0, 0, 0, 0], [1, 2, 0, 4]]]
f = Fiber.fromUncompressed(u_t)
ff = f.flattenRanks()
ff_ref = Fiber([(0, 0), (0, 1), (0, 2), (0, 3), (2, 0), (2, 1), (2, 3)], [Fiber([0, 1, 2], [1, 2, 3]), Fiber([0, 2, 3], [1, 3, 4]), Fiber([1, 2, 3], [2, 3, 4]), Fiber([0, 1, 3], [1, 2, 4]), Fiber([0, 1, 2], [1, 2, 3]), Fiber([0, 2, 3], [1, 3, 4]), Fiber([0, 1, 3], [1, 2, 4])])
self.assertEqual(ff, ff_ref)
fu = ff.unflattenRanks()
self.assertEqual(fu, f) | Test flattening/unflattening 1 level | test/test_fiber.py | test_flatten | Fibertree-Project/fibertree | 2 | python | def test_flatten(self):
u_t = [[[1, 2, 3, 0], [1, 0, 3, 4], [0, 2, 3, 4], [1, 2, 0, 4]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 2, 3, 0], [1, 0, 3, 4], [0, 0, 0, 0], [1, 2, 0, 4]]]
f = Fiber.fromUncompressed(u_t)
ff = f.flattenRanks()
ff_ref = Fiber([(0, 0), (0, 1), (0, 2), (0, 3), (2, 0), (2, 1), (2, 3)], [Fiber([0, 1, 2], [1, 2, 3]), Fiber([0, 2, 3], [1, 3, 4]), Fiber([1, 2, 3], [2, 3, 4]), Fiber([0, 1, 3], [1, 2, 4]), Fiber([0, 1, 2], [1, 2, 3]), Fiber([0, 2, 3], [1, 3, 4]), Fiber([0, 1, 3], [1, 2, 4])])
self.assertEqual(ff, ff_ref)
fu = ff.unflattenRanks()
self.assertEqual(fu, f) | def test_flatten(self):
u_t = [[[1, 2, 3, 0], [1, 0, 3, 4], [0, 2, 3, 4], [1, 2, 0, 4]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 2, 3, 0], [1, 0, 3, 4], [0, 0, 0, 0], [1, 2, 0, 4]]]
f = Fiber.fromUncompressed(u_t)
ff = f.flattenRanks()
ff_ref = Fiber([(0, 0), (0, 1), (0, 2), (0, 3), (2, 0), (2, 1), (2, 3)], [Fiber([0, 1, 2], [1, 2, 3]), Fiber([0, 2, 3], [1, 3, 4]), Fiber([1, 2, 3], [2, 3, 4]), Fiber([0, 1, 3], [1, 2, 4]), Fiber([0, 1, 2], [1, 2, 3]), Fiber([0, 2, 3], [1, 3, 4]), Fiber([0, 1, 3], [1, 2, 4])])
self.assertEqual(ff, ff_ref)
fu = ff.unflattenRanks()
self.assertEqual(fu, f)<|docstring|>Test flattening/unflattening 1 level<|endoftext|> |
39b69cb89c97606a055a57b3dbb2f1169106bcf44c32f9eee9f2202ffcc35807 | def test_flatten_levels_2(self):
'Test flattening/unflattening 2 levels'
u_t = [[[1, 2, 3, 0], [1, 0, 3, 4], [0, 2, 3, 4], [1, 2, 0, 4]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 2, 3, 0], [1, 0, 3, 4], [0, 0, 0, 0], [1, 2, 0, 4]]]
f = Fiber.fromUncompressed(u_t)
ff = f.flattenRanks(levels=2)
ref_coords = [(0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 1, 0), (0, 1, 2), (0, 1, 3), (0, 2, 1), (0, 2, 2), (0, 2, 3), (0, 3, 0), (0, 3, 1), (0, 3, 3), (2, 0, 0), (2, 0, 1), (2, 0, 2), (2, 1, 0), (2, 1, 2), (2, 1, 3), (2, 3, 0), (2, 3, 1), (2, 3, 3)]
ref_payloads = [1, 2, 3, 1, 3, 4, 2, 3, 4, 1, 2, 4, 1, 2, 3, 1, 3, 4, 1, 2, 4]
ff_ref = Fiber(coords=ref_coords, payloads=ref_payloads)
self.assertEqual(ff, ff_ref)
fu = ff.unflattenRanks(levels=2)
self.assertEqual(fu, f)
fu1 = ff.unflattenRanks(levels=1)
fu1.updatePayloads((lambda p: p.unflattenRanks(levels=1)))
self.assertEqual(fu1, f) | Test flattening/unflattening 2 levels | test/test_fiber.py | test_flatten_levels_2 | Fibertree-Project/fibertree | 2 | python | def test_flatten_levels_2(self):
u_t = [[[1, 2, 3, 0], [1, 0, 3, 4], [0, 2, 3, 4], [1, 2, 0, 4]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 2, 3, 0], [1, 0, 3, 4], [0, 0, 0, 0], [1, 2, 0, 4]]]
f = Fiber.fromUncompressed(u_t)
ff = f.flattenRanks(levels=2)
ref_coords = [(0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 1, 0), (0, 1, 2), (0, 1, 3), (0, 2, 1), (0, 2, 2), (0, 2, 3), (0, 3, 0), (0, 3, 1), (0, 3, 3), (2, 0, 0), (2, 0, 1), (2, 0, 2), (2, 1, 0), (2, 1, 2), (2, 1, 3), (2, 3, 0), (2, 3, 1), (2, 3, 3)]
ref_payloads = [1, 2, 3, 1, 3, 4, 2, 3, 4, 1, 2, 4, 1, 2, 3, 1, 3, 4, 1, 2, 4]
ff_ref = Fiber(coords=ref_coords, payloads=ref_payloads)
self.assertEqual(ff, ff_ref)
fu = ff.unflattenRanks(levels=2)
self.assertEqual(fu, f)
fu1 = ff.unflattenRanks(levels=1)
fu1.updatePayloads((lambda p: p.unflattenRanks(levels=1)))
self.assertEqual(fu1, f) | def test_flatten_levels_2(self):
u_t = [[[1, 2, 3, 0], [1, 0, 3, 4], [0, 2, 3, 4], [1, 2, 0, 4]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 2, 3, 0], [1, 0, 3, 4], [0, 0, 0, 0], [1, 2, 0, 4]]]
f = Fiber.fromUncompressed(u_t)
ff = f.flattenRanks(levels=2)
ref_coords = [(0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 1, 0), (0, 1, 2), (0, 1, 3), (0, 2, 1), (0, 2, 2), (0, 2, 3), (0, 3, 0), (0, 3, 1), (0, 3, 3), (2, 0, 0), (2, 0, 1), (2, 0, 2), (2, 1, 0), (2, 1, 2), (2, 1, 3), (2, 3, 0), (2, 3, 1), (2, 3, 3)]
ref_payloads = [1, 2, 3, 1, 3, 4, 2, 3, 4, 1, 2, 4, 1, 2, 3, 1, 3, 4, 1, 2, 4]
ff_ref = Fiber(coords=ref_coords, payloads=ref_payloads)
self.assertEqual(ff, ff_ref)
fu = ff.unflattenRanks(levels=2)
self.assertEqual(fu, f)
fu1 = ff.unflattenRanks(levels=1)
fu1.updatePayloads((lambda p: p.unflattenRanks(levels=1)))
self.assertEqual(fu1, f)<|docstring|>Test flattening/unflattening 2 levels<|endoftext|> |
ff3a0e25c85e3cb834186a5e2c894959bd4fd0e6e2c91de5969d3819623dc563 | def test_flatten_levels_3(self):
'Test flattening/unflattening 3 levels'
pass | Test flattening/unflattening 3 levels | test/test_fiber.py | test_flatten_levels_3 | Fibertree-Project/fibertree | 2 | python | def test_flatten_levels_3(self):
pass | def test_flatten_levels_3(self):
pass<|docstring|>Test flattening/unflattening 3 levels<|endoftext|> |
071056f7fee964a5bf2be452656bf2f36d24622e41fbf16d4fcfca267bf0ec48 | @cuda.jit(device=True, inline=True)
def logp(denom: torch.Tensor, acts: torch.Tensor, maxT: int, maxU: int, alphabet_size: int, mb: int, t: int, u: int, v: int):
'\n Compute the sum of log probability from the activation tensor and its denominator.\n\n Args:\n denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor\n across entire vocabulary.\n acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.\n maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.\n maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.\n alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).\n mb: Batch indexer.\n t: Acoustic sequence timestep indexer.\n u: Target sequence timestep indexer.\n v: Vocabulary token indexer.\n\n Returns:\n The sum of logprobs[mb, t, u, v] + denom[mb, t, u]\n '
col = ((((mb * maxT) + t) * maxU) + u)
return (denom[col] + acts[((col * alphabet_size) + v)]) | Compute the sum of log probability from the activation tensor and its denominator.
Args:
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor
across entire vocabulary.
acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
mb: Batch indexer.
t: Acoustic sequence timestep indexer.
u: Target sequence timestep indexer.
v: Vocabulary token indexer.
Returns:
The sum of logprobs[mb, t, u, v] + denom[mb, t, u] | warprnnt_numba/rnnt_loss/utils/cuda_utils/gpu_rnnt_kernel.py | logp | titu1994/warprnnt_numba | 7 | python | @cuda.jit(device=True, inline=True)
def logp(denom: torch.Tensor, acts: torch.Tensor, maxT: int, maxU: int, alphabet_size: int, mb: int, t: int, u: int, v: int):
'\n Compute the sum of log probability from the activation tensor and its denominator.\n\n Args:\n denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor\n across entire vocabulary.\n acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.\n maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.\n maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.\n alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).\n mb: Batch indexer.\n t: Acoustic sequence timestep indexer.\n u: Target sequence timestep indexer.\n v: Vocabulary token indexer.\n\n Returns:\n The sum of logprobs[mb, t, u, v] + denom[mb, t, u]\n '
col = ((((mb * maxT) + t) * maxU) + u)
return (denom[col] + acts[((col * alphabet_size) + v)]) | @cuda.jit(device=True, inline=True)
def logp(denom: torch.Tensor, acts: torch.Tensor, maxT: int, maxU: int, alphabet_size: int, mb: int, t: int, u: int, v: int):
'\n Compute the sum of log probability from the activation tensor and its denominator.\n\n Args:\n denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor\n across entire vocabulary.\n acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.\n maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.\n maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.\n alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).\n mb: Batch indexer.\n t: Acoustic sequence timestep indexer.\n u: Target sequence timestep indexer.\n v: Vocabulary token indexer.\n\n Returns:\n The sum of logprobs[mb, t, u, v] + denom[mb, t, u]\n '
col = ((((mb * maxT) + t) * maxU) + u)
return (denom[col] + acts[((col * alphabet_size) + v)])<|docstring|>Compute the sum of log probability from the activation tensor and its denominator.
Args:
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor
across entire vocabulary.
acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
mb: Batch indexer.
t: Acoustic sequence timestep indexer.
u: Target sequence timestep indexer.
v: Vocabulary token indexer.
Returns:
The sum of logprobs[mb, t, u, v] + denom[mb, t, u]<|endoftext|> |
7e2544bc2644d8bcd22171103041a36c75531c33a081c1a66fcfc7cc428ade74 | @cuda.jit()
def compute_alphas_kernel(acts: torch.Tensor, denom: torch.Tensor, alphas: torch.Tensor, llForward: torch.Tensor, xlen: torch.Tensor, ylen: torch.Tensor, mlabels: torch.Tensor, minibatch: int, maxT: int, maxU: int, alphabet_size: int, blank_: int):
'\n Compute alpha (forward variable) probabilities over the transduction step.\n\n Args:\n acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.\n denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor\n across entire vocabulary.\n alphas: Zero tensor of shape [B, T, U]. Will be updated inside the kernel with the forward variable\n probabilities.\n llForward: Zero tensor of shape [B]. Represents the log-likelihood of the forward pass.\n Returned as the forward pass loss that is reduced by the optimizer.\n xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded\n activation tensor.\n ylen: Vector of length B which contains the actual target sequence lengths in the padded\n activation tensor.\n mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).\n The matrix contains the padded target transcription that must be predicted.\n minibatch: Int representing the batch size.\n maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.\n maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.\n alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).\n blank_: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.\n\n Updates:\n Kernel inplace updates the following inputs:\n - alphas: forward variable scores.\n - llForward: log-likelihood of forward variable.\n '
b = cuda.blockIdx.x
u = cuda.threadIdx.x
T = xlen[b]
U = (ylen[b] + 1)
labels: torch.Tensor = mlabels[b]
offset = ((b * maxT) * maxU)
if (u == 0):
alphas[offset] = 0
cuda.syncthreads()
for n in range(1, ((T + U) - 1)):
t = (n - u)
if (u == 0):
if ((t > 0) and (t < T)):
alphas[((offset + (t * maxU)) + u)] = (alphas[((offset + ((t - 1) * maxU)) + u)] + logp(denom, acts, maxT, maxU, alphabet_size, b, (t - 1), 0, blank_))
elif (u < U):
if (t == 0):
alphas[(offset + u)] = (alphas[((offset + u) - 1)] + logp(denom, acts, maxT, maxU, alphabet_size, b, 0, (u - 1), labels[(u - 1)]))
elif ((t > 0) and (t < T)):
no_emit = (alphas[((offset + ((t - 1) * maxU)) + u)] + logp(denom, acts, maxT, maxU, alphabet_size, b, (t - 1), u, blank_))
emit = (alphas[(((offset + (t * maxU)) + u) - 1)] + logp(denom, acts, maxT, maxU, alphabet_size, b, t, (u - 1), labels[(u - 1)]))
alphas[((offset + (t * maxU)) + u)] = rnnt_helper.log_sum_exp(emit, no_emit)
cuda.syncthreads()
if (u == 0):
loglike = (alphas[(((offset + ((T - 1) * maxU)) + U) - 1)] + logp(denom, acts, maxT, maxU, alphabet_size, b, (T - 1), (U - 1), blank_))
llForward[b] = loglike | Compute alpha (forward variable) probabilities over the transduction step.
Args:
acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor
across entire vocabulary.
alphas: Zero tensor of shape [B, T, U]. Will be updated inside the kernel with the forward variable
probabilities.
llForward: Zero tensor of shape [B]. Represents the log-likelihood of the forward pass.
Returned as the forward pass loss that is reduced by the optimizer.
xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded
activation tensor.
ylen: Vector of length B which contains the actual target sequence lengths in the padded
activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).
The matrix contains the padded target transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.
Updates:
Kernel inplace updates the following inputs:
- alphas: forward variable scores.
- llForward: log-likelihood of forward variable. | warprnnt_numba/rnnt_loss/utils/cuda_utils/gpu_rnnt_kernel.py | compute_alphas_kernel | titu1994/warprnnt_numba | 7 | python | @cuda.jit()
def compute_alphas_kernel(acts: torch.Tensor, denom: torch.Tensor, alphas: torch.Tensor, llForward: torch.Tensor, xlen: torch.Tensor, ylen: torch.Tensor, mlabels: torch.Tensor, minibatch: int, maxT: int, maxU: int, alphabet_size: int, blank_: int):
'\n Compute alpha (forward variable) probabilities over the transduction step.\n\n Args:\n acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.\n denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor\n across entire vocabulary.\n alphas: Zero tensor of shape [B, T, U]. Will be updated inside the kernel with the forward variable\n probabilities.\n llForward: Zero tensor of shape [B]. Represents the log-likelihood of the forward pass.\n Returned as the forward pass loss that is reduced by the optimizer.\n xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded\n activation tensor.\n ylen: Vector of length B which contains the actual target sequence lengths in the padded\n activation tensor.\n mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).\n The matrix contains the padded target transcription that must be predicted.\n minibatch: Int representing the batch size.\n maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.\n maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.\n alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).\n blank_: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.\n\n Updates:\n Kernel inplace updates the following inputs:\n - alphas: forward variable scores.\n - llForward: log-likelihood of forward variable.\n '
b = cuda.blockIdx.x
u = cuda.threadIdx.x
T = xlen[b]
U = (ylen[b] + 1)
labels: torch.Tensor = mlabels[b]
offset = ((b * maxT) * maxU)
if (u == 0):
alphas[offset] = 0
cuda.syncthreads()
for n in range(1, ((T + U) - 1)):
t = (n - u)
if (u == 0):
if ((t > 0) and (t < T)):
alphas[((offset + (t * maxU)) + u)] = (alphas[((offset + ((t - 1) * maxU)) + u)] + logp(denom, acts, maxT, maxU, alphabet_size, b, (t - 1), 0, blank_))
elif (u < U):
if (t == 0):
alphas[(offset + u)] = (alphas[((offset + u) - 1)] + logp(denom, acts, maxT, maxU, alphabet_size, b, 0, (u - 1), labels[(u - 1)]))
elif ((t > 0) and (t < T)):
no_emit = (alphas[((offset + ((t - 1) * maxU)) + u)] + logp(denom, acts, maxT, maxU, alphabet_size, b, (t - 1), u, blank_))
emit = (alphas[(((offset + (t * maxU)) + u) - 1)] + logp(denom, acts, maxT, maxU, alphabet_size, b, t, (u - 1), labels[(u - 1)]))
alphas[((offset + (t * maxU)) + u)] = rnnt_helper.log_sum_exp(emit, no_emit)
cuda.syncthreads()
if (u == 0):
loglike = (alphas[(((offset + ((T - 1) * maxU)) + U) - 1)] + logp(denom, acts, maxT, maxU, alphabet_size, b, (T - 1), (U - 1), blank_))
llForward[b] = loglike | @cuda.jit()
def compute_alphas_kernel(acts: torch.Tensor, denom: torch.Tensor, alphas: torch.Tensor, llForward: torch.Tensor, xlen: torch.Tensor, ylen: torch.Tensor, mlabels: torch.Tensor, minibatch: int, maxT: int, maxU: int, alphabet_size: int, blank_: int):
'\n Compute alpha (forward variable) probabilities over the transduction step.\n\n Args:\n acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.\n denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor\n across entire vocabulary.\n alphas: Zero tensor of shape [B, T, U]. Will be updated inside the kernel with the forward variable\n probabilities.\n llForward: Zero tensor of shape [B]. Represents the log-likelihood of the forward pass.\n Returned as the forward pass loss that is reduced by the optimizer.\n xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded\n activation tensor.\n ylen: Vector of length B which contains the actual target sequence lengths in the padded\n activation tensor.\n mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).\n The matrix contains the padded target transcription that must be predicted.\n minibatch: Int representing the batch size.\n maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.\n maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.\n alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).\n blank_: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.\n\n Updates:\n Kernel inplace updates the following inputs:\n - alphas: forward variable scores.\n - llForward: log-likelihood of forward variable.\n '
b = cuda.blockIdx.x
u = cuda.threadIdx.x
T = xlen[b]
U = (ylen[b] + 1)
labels: torch.Tensor = mlabels[b]
offset = ((b * maxT) * maxU)
if (u == 0):
alphas[offset] = 0
cuda.syncthreads()
for n in range(1, ((T + U) - 1)):
t = (n - u)
if (u == 0):
if ((t > 0) and (t < T)):
alphas[((offset + (t * maxU)) + u)] = (alphas[((offset + ((t - 1) * maxU)) + u)] + logp(denom, acts, maxT, maxU, alphabet_size, b, (t - 1), 0, blank_))
elif (u < U):
if (t == 0):
alphas[(offset + u)] = (alphas[((offset + u) - 1)] + logp(denom, acts, maxT, maxU, alphabet_size, b, 0, (u - 1), labels[(u - 1)]))
elif ((t > 0) and (t < T)):
no_emit = (alphas[((offset + ((t - 1) * maxU)) + u)] + logp(denom, acts, maxT, maxU, alphabet_size, b, (t - 1), u, blank_))
emit = (alphas[(((offset + (t * maxU)) + u) - 1)] + logp(denom, acts, maxT, maxU, alphabet_size, b, t, (u - 1), labels[(u - 1)]))
alphas[((offset + (t * maxU)) + u)] = rnnt_helper.log_sum_exp(emit, no_emit)
cuda.syncthreads()
if (u == 0):
loglike = (alphas[(((offset + ((T - 1) * maxU)) + U) - 1)] + logp(denom, acts, maxT, maxU, alphabet_size, b, (T - 1), (U - 1), blank_))
llForward[b] = loglike<|docstring|>Compute alpha (forward variable) probabilities over the transduction step.
Args:
acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor
across entire vocabulary.
alphas: Zero tensor of shape [B, T, U]. Will be updated inside the kernel with the forward variable
probabilities.
llForward: Zero tensor of shape [B]. Represents the log-likelihood of the forward pass.
Returned as the forward pass loss that is reduced by the optimizer.
xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded
activation tensor.
ylen: Vector of length B which contains the actual target sequence lengths in the padded
activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).
The matrix contains the padded target transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.
Updates:
Kernel inplace updates the following inputs:
- alphas: forward variable scores.
- llForward: log-likelihood of forward variable.<|endoftext|> |
2a1d1e18ea3bbcfbb644634e8f994bdafd282fc6d8b12cc3f8fe8e404b37853a | @cuda.jit()
def compute_betas_kernel(acts: torch.Tensor, denom: torch.Tensor, betas: torch.Tensor, llBackward: torch.Tensor, xlen: torch.Tensor, ylen: torch.Tensor, mlabels: torch.Tensor, minibatch: int, maxT: int, maxU: int, alphabet_size: int, blank_: int):
'\n Compute beta (backward variable) probabilities over the transduction step.\n\n Args:\n acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.\n denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor\n across entire vocabulary.\n betas: Zero tensor of shape [B, T, U]. Will be updated inside the kernel with the backward variable\n probabilities.\n llBackward: Zero tensor of shape [B]. Represents the log-likelihood of the backward pass.\n Returned as the backward pass loss that is reduced by the optimizer.\n xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded\n activation tensor.\n ylen: Vector of length B which contains the actual target sequence lengths in the padded\n activation tensor.\n mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).\n The matrix contains the padded target transcription that must be predicted.\n minibatch: Int representing the batch size.\n maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.\n maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.\n alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).\n blank_: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.\n\n Updates:\n Kernel inplace updates the following inputs:\n - betas: backward variable scores.\n - llBackward: log-likelihood of backward variable.\n '
b = cuda.blockIdx.x
u = cuda.threadIdx.x
T = xlen[b]
U = (ylen[b] + 1)
labels: torch.Tensor = mlabels[b]
offset = ((b * maxT) * maxU)
if (u == 0):
betas[(((offset + ((T - 1) * maxU)) + U) - 1)] = logp(denom, acts, maxT, maxU, alphabet_size, b, (T - 1), (U - 1), blank_)
cuda.syncthreads()
for n in range(((T + U) - 2), (- 1), (- 1)):
t = (n - u)
if (u == (U - 1)):
if ((t >= 0) and (t < (T - 1))):
betas[(((offset + (t * maxU)) + U) - 1)] = (betas[(((offset + ((t + 1) * maxU)) + U) - 1)] + logp(denom, acts, maxT, maxU, alphabet_size, b, t, (U - 1), blank_))
elif (u < U):
if (t == (T - 1)):
betas[((offset + ((T - 1) * maxU)) + u)] = (betas[(((offset + ((T - 1) * maxU)) + u) + 1)] + logp(denom, acts, maxT, maxU, alphabet_size, b, (T - 1), u, labels[u]))
elif ((t >= 0) and (t < (T - 1))):
no_emit = (betas[((offset + ((t + 1) * maxU)) + u)] + logp(denom, acts, maxT, maxU, alphabet_size, b, t, u, blank_))
emit = (betas[(((offset + (t * maxU)) + u) + 1)] + logp(denom, acts, maxT, maxU, alphabet_size, b, t, u, labels[u]))
betas[((offset + (t * maxU)) + u)] = rnnt_helper.log_sum_exp(emit, no_emit)
cuda.syncthreads()
if (u == 0):
llBackward[b] = betas[offset] | Compute beta (backward variable) probabilities over the transduction step.
Args:
acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor
across entire vocabulary.
betas: Zero tensor of shape [B, T, U]. Will be updated inside the kernel with the backward variable
probabilities.
llBackward: Zero tensor of shape [B]. Represents the log-likelihood of the backward pass.
Returned as the backward pass loss that is reduced by the optimizer.
xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded
activation tensor.
ylen: Vector of length B which contains the actual target sequence lengths in the padded
activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).
The matrix contains the padded target transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.
Updates:
Kernel inplace updates the following inputs:
- betas: backward variable scores.
- llBackward: log-likelihood of backward variable. | warprnnt_numba/rnnt_loss/utils/cuda_utils/gpu_rnnt_kernel.py | compute_betas_kernel | titu1994/warprnnt_numba | 7 | python | @cuda.jit()
def compute_betas_kernel(acts: torch.Tensor, denom: torch.Tensor, betas: torch.Tensor, llBackward: torch.Tensor, xlen: torch.Tensor, ylen: torch.Tensor, mlabels: torch.Tensor, minibatch: int, maxT: int, maxU: int, alphabet_size: int, blank_: int):
'\n Compute beta (backward variable) probabilities over the transduction step.\n\n Args:\n acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.\n denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor\n across entire vocabulary.\n betas: Zero tensor of shape [B, T, U]. Will be updated inside the kernel with the backward variable\n probabilities.\n llBackward: Zero tensor of shape [B]. Represents the log-likelihood of the backward pass.\n Returned as the backward pass loss that is reduced by the optimizer.\n xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded\n activation tensor.\n ylen: Vector of length B which contains the actual target sequence lengths in the padded\n activation tensor.\n mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).\n The matrix contains the padded target transcription that must be predicted.\n minibatch: Int representing the batch size.\n maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.\n maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.\n alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).\n blank_: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.\n\n Updates:\n Kernel inplace updates the following inputs:\n - betas: backward variable scores.\n - llBackward: log-likelihood of backward variable.\n '
b = cuda.blockIdx.x
u = cuda.threadIdx.x
T = xlen[b]
U = (ylen[b] + 1)
labels: torch.Tensor = mlabels[b]
offset = ((b * maxT) * maxU)
if (u == 0):
betas[(((offset + ((T - 1) * maxU)) + U) - 1)] = logp(denom, acts, maxT, maxU, alphabet_size, b, (T - 1), (U - 1), blank_)
cuda.syncthreads()
for n in range(((T + U) - 2), (- 1), (- 1)):
t = (n - u)
if (u == (U - 1)):
if ((t >= 0) and (t < (T - 1))):
betas[(((offset + (t * maxU)) + U) - 1)] = (betas[(((offset + ((t + 1) * maxU)) + U) - 1)] + logp(denom, acts, maxT, maxU, alphabet_size, b, t, (U - 1), blank_))
elif (u < U):
if (t == (T - 1)):
betas[((offset + ((T - 1) * maxU)) + u)] = (betas[(((offset + ((T - 1) * maxU)) + u) + 1)] + logp(denom, acts, maxT, maxU, alphabet_size, b, (T - 1), u, labels[u]))
elif ((t >= 0) and (t < (T - 1))):
no_emit = (betas[((offset + ((t + 1) * maxU)) + u)] + logp(denom, acts, maxT, maxU, alphabet_size, b, t, u, blank_))
emit = (betas[(((offset + (t * maxU)) + u) + 1)] + logp(denom, acts, maxT, maxU, alphabet_size, b, t, u, labels[u]))
betas[((offset + (t * maxU)) + u)] = rnnt_helper.log_sum_exp(emit, no_emit)
cuda.syncthreads()
if (u == 0):
llBackward[b] = betas[offset] | @cuda.jit()
def compute_betas_kernel(acts: torch.Tensor, denom: torch.Tensor, betas: torch.Tensor, llBackward: torch.Tensor, xlen: torch.Tensor, ylen: torch.Tensor, mlabels: torch.Tensor, minibatch: int, maxT: int, maxU: int, alphabet_size: int, blank_: int):
'\n Compute beta (backward variable) probabilities over the transduction step.\n\n Args:\n acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.\n denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor\n across entire vocabulary.\n betas: Zero tensor of shape [B, T, U]. Will be updated inside the kernel with the backward variable\n probabilities.\n llBackward: Zero tensor of shape [B]. Represents the log-likelihood of the backward pass.\n Returned as the backward pass loss that is reduced by the optimizer.\n xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded\n activation tensor.\n ylen: Vector of length B which contains the actual target sequence lengths in the padded\n activation tensor.\n mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).\n The matrix contains the padded target transcription that must be predicted.\n minibatch: Int representing the batch size.\n maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.\n maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.\n alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).\n blank_: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.\n\n Updates:\n Kernel inplace updates the following inputs:\n - betas: backward variable scores.\n - llBackward: log-likelihood of backward variable.\n '
b = cuda.blockIdx.x
u = cuda.threadIdx.x
T = xlen[b]
U = (ylen[b] + 1)
labels: torch.Tensor = mlabels[b]
offset = ((b * maxT) * maxU)
if (u == 0):
betas[(((offset + ((T - 1) * maxU)) + U) - 1)] = logp(denom, acts, maxT, maxU, alphabet_size, b, (T - 1), (U - 1), blank_)
cuda.syncthreads()
for n in range(((T + U) - 2), (- 1), (- 1)):
t = (n - u)
if (u == (U - 1)):
if ((t >= 0) and (t < (T - 1))):
betas[(((offset + (t * maxU)) + U) - 1)] = (betas[(((offset + ((t + 1) * maxU)) + U) - 1)] + logp(denom, acts, maxT, maxU, alphabet_size, b, t, (U - 1), blank_))
elif (u < U):
if (t == (T - 1)):
betas[((offset + ((T - 1) * maxU)) + u)] = (betas[(((offset + ((T - 1) * maxU)) + u) + 1)] + logp(denom, acts, maxT, maxU, alphabet_size, b, (T - 1), u, labels[u]))
elif ((t >= 0) and (t < (T - 1))):
no_emit = (betas[((offset + ((t + 1) * maxU)) + u)] + logp(denom, acts, maxT, maxU, alphabet_size, b, t, u, blank_))
emit = (betas[(((offset + (t * maxU)) + u) + 1)] + logp(denom, acts, maxT, maxU, alphabet_size, b, t, u, labels[u]))
betas[((offset + (t * maxU)) + u)] = rnnt_helper.log_sum_exp(emit, no_emit)
cuda.syncthreads()
if (u == 0):
llBackward[b] = betas[offset]<|docstring|>Compute beta (backward variable) probabilities over the transduction step.
Args:
acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor
across entire vocabulary.
betas: Zero tensor of shape [B, T, U]. Will be updated inside the kernel with the backward variable
probabilities.
llBackward: Zero tensor of shape [B]. Represents the log-likelihood of the backward pass.
Returned as the backward pass loss that is reduced by the optimizer.
xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded
activation tensor.
ylen: Vector of length B which contains the actual target sequence lengths in the padded
activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).
The matrix contains the padded target transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.
Updates:
Kernel inplace updates the following inputs:
- betas: backward variable scores.
- llBackward: log-likelihood of backward variable.<|endoftext|> |
2c994be3b1f4b5b0e1bc3a428980697fcc066ead789df5d5acad8de1ddc3d297 | @cuda.jit()
def compute_grad_kernel(grads: torch.Tensor, acts: torch.Tensor, denom: torch.Tensor, alphas: torch.Tensor, betas: torch.Tensor, logll: torch.Tensor, xlen: torch.Tensor, ylen: torch.Tensor, mlabels: torch.Tensor, minibatch: int, maxT: int, maxU: int, alphabet_size: int, blank_: int, fastemit_lambda: float, clamp: float):
'\n Compute gradients over the transduction step.\n\n Args:\n grads: Zero Tensor of shape [B, T, U, V+1]. Is updated by this kernel to contain the gradients\n of this batch of samples.\n acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.\n denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor\n across entire vocabulary.\n alphas: Alpha variable, contains forward probabilities. A tensor of shape [B, T, U].\n betas: Beta varoable, contains backward probabilities. A tensor of shape [B, T, U].\n logll: Log-likelihood of the forward variable, represented as a vector of shape [B].\n Represents the log-likelihood of the forward pass.\n xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded\n activation tensor.\n ylen: Vector of length B which contains the actual target sequence lengths in the padded\n activation tensor.\n mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).\n The matrix contains the padded target transcription that must be predicted.\n minibatch: Int representing the batch size.\n maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.\n maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.\n alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).\n blank_: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.\n fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to\n FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization.\n clamp: Float value. When set to value >= 0.0, will clamp the gradient to [-clamp, clamp].\n\n Updates:\n Kernel inplace updates the following inputs:\n - grads: Gradients with respect to the log likelihood (logll).\n '
tid = cuda.threadIdx.x
idx = tid
col = cuda.blockIdx.x
u = (col % maxU)
bt = ((col - u) // maxU)
t = (bt % maxT)
mb = ((bt - t) // maxT)
T = xlen[mb]
U = (ylen[mb] + 1)
labels: torch.Tensor = mlabels[mb]
if ((t < T) and (u < U)):
while (idx < alphabet_size):
logpk = (denom[col] + acts[((col * alphabet_size) + idx)])
grad = math.exp((((alphas[col] + betas[col]) + logpk) - logll[mb]))
if ((fastemit_lambda > 0.0) and (u < (U - 1))):
fastemit_grad = (fastemit_lambda * math.exp(((((alphas[col] + (denom[col] + acts[((col * alphabet_size) + labels[u])])) + betas[(col + 1)]) + logpk) - logll[mb])))
else:
fastemit_grad = 0.0
grad = (grad + fastemit_grad)
if ((idx == blank_) and (t == (T - 1)) and (u == (U - 1))):
grad -= math.exp(((alphas[col] + logpk) - logll[mb]))
if ((idx == blank_) and (t < (T - 1))):
grad -= math.exp((((alphas[col] + logpk) - logll[mb]) + betas[(col + maxU)]))
if ((u < (U - 1)) and (idx == labels[u])):
grad -= math.exp(((((math.log1p(fastemit_lambda) + alphas[col]) + logpk) - logll[mb]) + betas[(col + 1)]))
grads[((col * alphabet_size) + idx)] = grad
if (clamp > 0.0):
g = grads[((col * alphabet_size) + idx)]
g = min(g, clamp)
g = max(g, (- clamp))
grads[((col * alphabet_size) + idx)] = g
idx += GPU_RNNT_THREAD_SIZE | Compute gradients over the transduction step.
Args:
grads: Zero Tensor of shape [B, T, U, V+1]. Is updated by this kernel to contain the gradients
of this batch of samples.
acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor
across entire vocabulary.
alphas: Alpha variable, contains forward probabilities. A tensor of shape [B, T, U].
betas: Beta varoable, contains backward probabilities. A tensor of shape [B, T, U].
logll: Log-likelihood of the forward variable, represented as a vector of shape [B].
Represents the log-likelihood of the forward pass.
xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded
activation tensor.
ylen: Vector of length B which contains the actual target sequence lengths in the padded
activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).
The matrix contains the padded target transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the gradient to [-clamp, clamp].
Updates:
Kernel inplace updates the following inputs:
- grads: Gradients with respect to the log likelihood (logll). | warprnnt_numba/rnnt_loss/utils/cuda_utils/gpu_rnnt_kernel.py | compute_grad_kernel | titu1994/warprnnt_numba | 7 | python | @cuda.jit()
def compute_grad_kernel(grads: torch.Tensor, acts: torch.Tensor, denom: torch.Tensor, alphas: torch.Tensor, betas: torch.Tensor, logll: torch.Tensor, xlen: torch.Tensor, ylen: torch.Tensor, mlabels: torch.Tensor, minibatch: int, maxT: int, maxU: int, alphabet_size: int, blank_: int, fastemit_lambda: float, clamp: float):
'\n Compute gradients over the transduction step.\n\n Args:\n grads: Zero Tensor of shape [B, T, U, V+1]. Is updated by this kernel to contain the gradients\n of this batch of samples.\n acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.\n denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor\n across entire vocabulary.\n alphas: Alpha variable, contains forward probabilities. A tensor of shape [B, T, U].\n betas: Beta varoable, contains backward probabilities. A tensor of shape [B, T, U].\n logll: Log-likelihood of the forward variable, represented as a vector of shape [B].\n Represents the log-likelihood of the forward pass.\n xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded\n activation tensor.\n ylen: Vector of length B which contains the actual target sequence lengths in the padded\n activation tensor.\n mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).\n The matrix contains the padded target transcription that must be predicted.\n minibatch: Int representing the batch size.\n maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.\n maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.\n alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).\n blank_: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.\n fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to\n FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization.\n clamp: Float value. When set to value >= 0.0, will clamp the gradient to [-clamp, clamp].\n\n Updates:\n Kernel inplace updates the following inputs:\n - grads: Gradients with respect to the log likelihood (logll).\n '
tid = cuda.threadIdx.x
idx = tid
col = cuda.blockIdx.x
u = (col % maxU)
bt = ((col - u) // maxU)
t = (bt % maxT)
mb = ((bt - t) // maxT)
T = xlen[mb]
U = (ylen[mb] + 1)
labels: torch.Tensor = mlabels[mb]
if ((t < T) and (u < U)):
while (idx < alphabet_size):
logpk = (denom[col] + acts[((col * alphabet_size) + idx)])
grad = math.exp((((alphas[col] + betas[col]) + logpk) - logll[mb]))
if ((fastemit_lambda > 0.0) and (u < (U - 1))):
fastemit_grad = (fastemit_lambda * math.exp(((((alphas[col] + (denom[col] + acts[((col * alphabet_size) + labels[u])])) + betas[(col + 1)]) + logpk) - logll[mb])))
else:
fastemit_grad = 0.0
grad = (grad + fastemit_grad)
if ((idx == blank_) and (t == (T - 1)) and (u == (U - 1))):
grad -= math.exp(((alphas[col] + logpk) - logll[mb]))
if ((idx == blank_) and (t < (T - 1))):
grad -= math.exp((((alphas[col] + logpk) - logll[mb]) + betas[(col + maxU)]))
if ((u < (U - 1)) and (idx == labels[u])):
grad -= math.exp(((((math.log1p(fastemit_lambda) + alphas[col]) + logpk) - logll[mb]) + betas[(col + 1)]))
grads[((col * alphabet_size) + idx)] = grad
if (clamp > 0.0):
g = grads[((col * alphabet_size) + idx)]
g = min(g, clamp)
g = max(g, (- clamp))
grads[((col * alphabet_size) + idx)] = g
idx += GPU_RNNT_THREAD_SIZE | @cuda.jit()
def compute_grad_kernel(grads: torch.Tensor, acts: torch.Tensor, denom: torch.Tensor, alphas: torch.Tensor, betas: torch.Tensor, logll: torch.Tensor, xlen: torch.Tensor, ylen: torch.Tensor, mlabels: torch.Tensor, minibatch: int, maxT: int, maxU: int, alphabet_size: int, blank_: int, fastemit_lambda: float, clamp: float):
'\n Compute gradients over the transduction step.\n\n Args:\n grads: Zero Tensor of shape [B, T, U, V+1]. Is updated by this kernel to contain the gradients\n of this batch of samples.\n acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.\n denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor\n across entire vocabulary.\n alphas: Alpha variable, contains forward probabilities. A tensor of shape [B, T, U].\n betas: Beta varoable, contains backward probabilities. A tensor of shape [B, T, U].\n logll: Log-likelihood of the forward variable, represented as a vector of shape [B].\n Represents the log-likelihood of the forward pass.\n xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded\n activation tensor.\n ylen: Vector of length B which contains the actual target sequence lengths in the padded\n activation tensor.\n mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).\n The matrix contains the padded target transcription that must be predicted.\n minibatch: Int representing the batch size.\n maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.\n maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.\n alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).\n blank_: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.\n fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to\n FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization.\n clamp: Float value. When set to value >= 0.0, will clamp the gradient to [-clamp, clamp].\n\n Updates:\n Kernel inplace updates the following inputs:\n - grads: Gradients with respect to the log likelihood (logll).\n '
tid = cuda.threadIdx.x
idx = tid
col = cuda.blockIdx.x
u = (col % maxU)
bt = ((col - u) // maxU)
t = (bt % maxT)
mb = ((bt - t) // maxT)
T = xlen[mb]
U = (ylen[mb] + 1)
labels: torch.Tensor = mlabels[mb]
if ((t < T) and (u < U)):
while (idx < alphabet_size):
logpk = (denom[col] + acts[((col * alphabet_size) + idx)])
grad = math.exp((((alphas[col] + betas[col]) + logpk) - logll[mb]))
if ((fastemit_lambda > 0.0) and (u < (U - 1))):
fastemit_grad = (fastemit_lambda * math.exp(((((alphas[col] + (denom[col] + acts[((col * alphabet_size) + labels[u])])) + betas[(col + 1)]) + logpk) - logll[mb])))
else:
fastemit_grad = 0.0
grad = (grad + fastemit_grad)
if ((idx == blank_) and (t == (T - 1)) and (u == (U - 1))):
grad -= math.exp(((alphas[col] + logpk) - logll[mb]))
if ((idx == blank_) and (t < (T - 1))):
grad -= math.exp((((alphas[col] + logpk) - logll[mb]) + betas[(col + maxU)]))
if ((u < (U - 1)) and (idx == labels[u])):
grad -= math.exp(((((math.log1p(fastemit_lambda) + alphas[col]) + logpk) - logll[mb]) + betas[(col + 1)]))
grads[((col * alphabet_size) + idx)] = grad
if (clamp > 0.0):
g = grads[((col * alphabet_size) + idx)]
g = min(g, clamp)
g = max(g, (- clamp))
grads[((col * alphabet_size) + idx)] = g
idx += GPU_RNNT_THREAD_SIZE<|docstring|>Compute gradients over the transduction step.
Args:
grads: Zero Tensor of shape [B, T, U, V+1]. Is updated by this kernel to contain the gradients
of this batch of samples.
acts: Tensor of shape [B, T, U, V+1] flattened. Represents the logprobs activation tensor.
denom: Tensor of shape [B, T, U] flattened. Represents the denominator of the logprobs activation tensor
across entire vocabulary.
alphas: Alpha variable, contains forward probabilities. A tensor of shape [B, T, U].
betas: Beta varoable, contains backward probabilities. A tensor of shape [B, T, U].
logll: Log-likelihood of the forward variable, represented as a vector of shape [B].
Represents the log-likelihood of the forward pass.
xlen: Vector of length B which contains the actual acoustic sequence lengths in the padded
activation tensor.
ylen: Vector of length B which contains the actual target sequence lengths in the padded
activation tensor.
mlabels: Matrix of shape [B, U+1] (+1 here is due to <SOS> token - usually the RNNT blank).
The matrix contains the padded target transcription that must be predicted.
minibatch: Int representing the batch size.
maxT: The maximum possible acoustic sequence length. Represents T in the logprobs tensor.
maxU: The maximum possible target sequence length. Represents U in the logprobs tensor.
alphabet_size: The vocabulary dimension V+1 (inclusive of RNNT blank).
blank_: Index of the RNNT blank token in the vocabulary. Generally the first or last token in the vocab.
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the gradient to [-clamp, clamp].
Updates:
Kernel inplace updates the following inputs:
- grads: Gradients with respect to the log likelihood (logll).<|endoftext|> |
d708bc0d16e9cfb991b69e48dd4a32c857f187f224bb33c89f29fc9cd716dc03 | def print_args(args):
'\n Print arguments (only show the relevant arguments)\n '
print('\nParameters:')
for (attr, value) in sorted(args.__dict__.items()):
print('\t{}={}'.format(attr.upper(), value)) | Print arguments (only show the relevant arguments) | model/train_jointly.py | print_args | chengemily/pidgin-rl | 0 | python | def print_args(args):
'\n \n '
print('\nParameters:')
for (attr, value) in sorted(args.__dict__.items()):
print('\t{}={}'.format(attr.upper(), value)) | def print_args(args):
'\n \n '
print('\nParameters:')
for (attr, value) in sorted(args.__dict__.items()):
print('\t{}={}'.format(attr.upper(), value))<|docstring|>Print arguments (only show the relevant arguments)<|endoftext|> |
3ca8600a8315af9a38f1b8b320a47487db0f18b76076ce8820e2afe8d2bb51ad | def build_model_from_scratch(args, seq_len, target_length, output_dims, device):
'\n Builds model from scratch\n :param args:\n :return:\n '
if args.use_pretrained:
embedding = nn.Linear(args.emsize, args.emsize)
else:
with open(args.vocab_path, 'r') as f:
vocab = json.load(f)
embedding = nn.Embedding(len(vocab), args.emsize, padding_idx=0)
decoder_rnn = Encoder(args.emsize, args.decoder_hidden, rnn_type=args.model, nlayers=args.decoder_nlayers, dropout=args.decoder_drop, bidirectional=args.decoder_bi)
attention_dim = (args.decoder_hidden if (not args.decoder_bi) else (2 * args.decoder_hidden))
dec_fc_layer_dims = [attention_dim, 10, 5]
if args.use_outputs:
dec_fc_layer_dims = [(seq_len * attention_dim), 500, 250, 50, 10]
fc_decoder = FC(dec_fc_layer_dims)
enc_fc_layer_dims = [args.encoder_hidden]
fc_encoder = FC_Encoder(enc_fc_layer_dims)
encoder_rnn = Decoder(output_dims, args.encoder_hidden, args.emsize, rnn_type=args.model, nlayers=args.encoder_nlayers, dropout=args.encoder_drop)
return Agent(embedding, encoder_rnn, fc_encoder, decoder_rnn, fc_decoder, args.batch_size, output_dims, target_length, device) | Builds model from scratch
:param args:
:return: | model/train_jointly.py | build_model_from_scratch | chengemily/pidgin-rl | 0 | python | def build_model_from_scratch(args, seq_len, target_length, output_dims, device):
'\n Builds model from scratch\n :param args:\n :return:\n '
if args.use_pretrained:
embedding = nn.Linear(args.emsize, args.emsize)
else:
with open(args.vocab_path, 'r') as f:
vocab = json.load(f)
embedding = nn.Embedding(len(vocab), args.emsize, padding_idx=0)
decoder_rnn = Encoder(args.emsize, args.decoder_hidden, rnn_type=args.model, nlayers=args.decoder_nlayers, dropout=args.decoder_drop, bidirectional=args.decoder_bi)
attention_dim = (args.decoder_hidden if (not args.decoder_bi) else (2 * args.decoder_hidden))
dec_fc_layer_dims = [attention_dim, 10, 5]
if args.use_outputs:
dec_fc_layer_dims = [(seq_len * attention_dim), 500, 250, 50, 10]
fc_decoder = FC(dec_fc_layer_dims)
enc_fc_layer_dims = [args.encoder_hidden]
fc_encoder = FC_Encoder(enc_fc_layer_dims)
encoder_rnn = Decoder(output_dims, args.encoder_hidden, args.emsize, rnn_type=args.model, nlayers=args.encoder_nlayers, dropout=args.encoder_drop)
return Agent(embedding, encoder_rnn, fc_encoder, decoder_rnn, fc_decoder, args.batch_size, output_dims, target_length, device) | def build_model_from_scratch(args, seq_len, target_length, output_dims, device):
'\n Builds model from scratch\n :param args:\n :return:\n '
if args.use_pretrained:
embedding = nn.Linear(args.emsize, args.emsize)
else:
with open(args.vocab_path, 'r') as f:
vocab = json.load(f)
embedding = nn.Embedding(len(vocab), args.emsize, padding_idx=0)
decoder_rnn = Encoder(args.emsize, args.decoder_hidden, rnn_type=args.model, nlayers=args.decoder_nlayers, dropout=args.decoder_drop, bidirectional=args.decoder_bi)
attention_dim = (args.decoder_hidden if (not args.decoder_bi) else (2 * args.decoder_hidden))
dec_fc_layer_dims = [attention_dim, 10, 5]
if args.use_outputs:
dec_fc_layer_dims = [(seq_len * attention_dim), 500, 250, 50, 10]
fc_decoder = FC(dec_fc_layer_dims)
enc_fc_layer_dims = [args.encoder_hidden]
fc_encoder = FC_Encoder(enc_fc_layer_dims)
encoder_rnn = Decoder(output_dims, args.encoder_hidden, args.emsize, rnn_type=args.model, nlayers=args.encoder_nlayers, dropout=args.encoder_drop)
return Agent(embedding, encoder_rnn, fc_encoder, decoder_rnn, fc_decoder, args.batch_size, output_dims, target_length, device)<|docstring|>Builds model from scratch
:param args:
:return:<|endoftext|> |
59f0780b1061e702f00e60ce555e560a0f6be082315406733d1adf105019ed7f | def _reward(self, action):
'\n The vehicle is rewarded for driving with high velocity on lanes to the right and avoiding collisions, but\n an additional altruistic penalty is also suffered if any vehicle on the merging lane has a low velocity.\n :param action: the action performed\n :return: the reward of the state-action transition\n '
action_reward = {0: self.LANE_CHANGE_REWARD, 1: 0, 2: self.LANE_CHANGE_REWARD, 3: 0, 4: 0}
reward = (((self.COLLISION_REWARD * self.vehicle.crashed) + ((self.RIGHT_LANE_REWARD * self.vehicle.lane_index) / (len(self.road.lanes) - 2))) + ((self.HIGH_VELOCITY_REWARD * self.vehicle.velocity_index) / (self.vehicle.SPEED_COUNT - 1)))
for vehicle in self.road.vehicles:
if ((vehicle.lane_index == (len(self.road.lanes) - 1)) and isinstance(vehicle, ControlledVehicle)):
reward += ((self.MERGING_VELOCITY_REWARD * (vehicle.target_velocity - vehicle.velocity)) / vehicle.target_velocity)
return (reward + action_reward[action]) | The vehicle is rewarded for driving with high velocity on lanes to the right and avoiding collisions, but
an additional altruistic penalty is also suffered if any vehicle on the merging lane has a low velocity.
:param action: the action performed
:return: the reward of the state-action transition | highway_env/envs/merge_env.py | _reward | jasonplato/High_SimulationPlatform | 0 | python | def _reward(self, action):
'\n The vehicle is rewarded for driving with high velocity on lanes to the right and avoiding collisions, but\n an additional altruistic penalty is also suffered if any vehicle on the merging lane has a low velocity.\n :param action: the action performed\n :return: the reward of the state-action transition\n '
action_reward = {0: self.LANE_CHANGE_REWARD, 1: 0, 2: self.LANE_CHANGE_REWARD, 3: 0, 4: 0}
reward = (((self.COLLISION_REWARD * self.vehicle.crashed) + ((self.RIGHT_LANE_REWARD * self.vehicle.lane_index) / (len(self.road.lanes) - 2))) + ((self.HIGH_VELOCITY_REWARD * self.vehicle.velocity_index) / (self.vehicle.SPEED_COUNT - 1)))
for vehicle in self.road.vehicles:
if ((vehicle.lane_index == (len(self.road.lanes) - 1)) and isinstance(vehicle, ControlledVehicle)):
reward += ((self.MERGING_VELOCITY_REWARD * (vehicle.target_velocity - vehicle.velocity)) / vehicle.target_velocity)
return (reward + action_reward[action]) | def _reward(self, action):
'\n The vehicle is rewarded for driving with high velocity on lanes to the right and avoiding collisions, but\n an additional altruistic penalty is also suffered if any vehicle on the merging lane has a low velocity.\n :param action: the action performed\n :return: the reward of the state-action transition\n '
action_reward = {0: self.LANE_CHANGE_REWARD, 1: 0, 2: self.LANE_CHANGE_REWARD, 3: 0, 4: 0}
reward = (((self.COLLISION_REWARD * self.vehicle.crashed) + ((self.RIGHT_LANE_REWARD * self.vehicle.lane_index) / (len(self.road.lanes) - 2))) + ((self.HIGH_VELOCITY_REWARD * self.vehicle.velocity_index) / (self.vehicle.SPEED_COUNT - 1)))
for vehicle in self.road.vehicles:
if ((vehicle.lane_index == (len(self.road.lanes) - 1)) and isinstance(vehicle, ControlledVehicle)):
reward += ((self.MERGING_VELOCITY_REWARD * (vehicle.target_velocity - vehicle.velocity)) / vehicle.target_velocity)
return (reward + action_reward[action])<|docstring|>The vehicle is rewarded for driving with high velocity on lanes to the right and avoiding collisions, but
an additional altruistic penalty is also suffered if any vehicle on the merging lane has a low velocity.
:param action: the action performed
:return: the reward of the state-action transition<|endoftext|> |
26cffbaf8cf770e550f7cccaab07dd1869fb1fcc1343613b2794994c1cd31e76 | def _is_terminal(self):
'\n The episode is over when a collision occurs or when the access ramp has been passed.\n '
return (self.vehicle.crashed or (self.steps >= self.DURATION)) | The episode is over when a collision occurs or when the access ramp has been passed. | highway_env/envs/merge_env.py | _is_terminal | jasonplato/High_SimulationPlatform | 0 | python | def _is_terminal(self):
'\n \n '
return (self.vehicle.crashed or (self.steps >= self.DURATION)) | def _is_terminal(self):
'\n \n '
return (self.vehicle.crashed or (self.steps >= self.DURATION))<|docstring|>The episode is over when a collision occurs or when the access ramp has been passed.<|endoftext|> |
4ebc0f44f73daee4526aff20d3ae13438fa206fd9af8ec7cd3116ef96dd7c3d1 | def make_road(self):
'\n Make a road composed of a straight highway and a merging lane.\n :return: the road\n '
amp = 3.25
str_len = 150
sin_len = 100
width = 4
net = RoadNetwork()
(n, c, s) = (LineType.NONE, LineType.CONTINUOUS, LineType.STRIPED)
net.add_lane('s1', 'inter1', SineLane([0, 0], [sin_len, 0], ((- amp) * 1), (np.pi / sin_len), (np.pi / 2), line_types=[c, c], forbidden=False))
net.add_lane('s1', 'inter1', SineLane([0, width], [sin_len, width], ((- amp) * 1), (np.pi / sin_len), (np.pi / 2), line_types=[c, c], forbidden=False))
net.add_lane('s2', 'inter1', SineLane([0, ((2 * amp) + (2 * width))], [sin_len, ((2 * amp) + (2 * width))], ((- amp) * (- 1)), (np.pi / sin_len), (np.pi / 2), line_types=[c, c], forbidden=False))
net.add_lane('s2', 'inter1', SineLane([0, ((2 * amp) + (3 * width))], [sin_len, ((2 * amp) + (3 * width))], ((- amp) * (- 1)), (np.pi / sin_len), (np.pi / 2), line_types=[c, c], forbidden=False))
net.add_lane('inter1', 'inter2', StraightLane([sin_len, amp], [(sin_len + str_len), amp], line_types=[c, s]))
net.add_lane('inter1', 'inter2', StraightLane([sin_len, (amp + width)], [(sin_len + str_len), (amp + width)], line_types=[s, s]))
net.add_lane('inter1', 'inter2', StraightLane([sin_len, (amp + (2 * width))], [(sin_len + str_len), (amp + (2 * width))], line_types=[s, s]))
net.add_lane('inter1', 'inter2', StraightLane([sin_len, (amp + (3 * width))], [(sin_len + str_len), (amp + (3 * width))], line_types=[s, c]))
net.add_lane('inter2', 'e1', SineLane([(sin_len + str_len), 0], [((2 * sin_len) + str_len), 0], (amp * 1), (np.pi / sin_len), (np.pi / 2), line_types=[c, c], forbidden=False))
net.add_lane('inter2', 'e1', SineLane([(sin_len + str_len), width], [((2 * sin_len) + str_len), width], (amp * 1), (np.pi / sin_len), (np.pi / 2), line_types=[c, c], forbidden=False))
net.add_lane('inter2', 'e2', SineLane([(sin_len + str_len), ((2 * width) + (2 * amp))], [((2 * sin_len) + str_len), ((2 * width) + (2 * amp))], (amp * (- 1)), (np.pi / sin_len), (np.pi / 2), line_types=[c, c], forbidden=False))
net.add_lane('inter2', 'e2', SineLane([(sin_len + str_len), ((3 * width) + (2 * amp))], [((2 * sin_len) + str_len), ((3 * width) + (2 * amp))], (amp * (- 1)), (np.pi / sin_len), (np.pi / 2), line_types=[c, c], forbidden=False))
road = Road(network=net, np_random=self.np_random)
self.road = road | Make a road composed of a straight highway and a merging lane.
:return: the road | highway_env/envs/merge_env.py | make_road | jasonplato/High_SimulationPlatform | 0 | python | def make_road(self):
'\n Make a road composed of a straight highway and a merging lane.\n :return: the road\n '
amp = 3.25
str_len = 150
sin_len = 100
width = 4
net = RoadNetwork()
(n, c, s) = (LineType.NONE, LineType.CONTINUOUS, LineType.STRIPED)
net.add_lane('s1', 'inter1', SineLane([0, 0], [sin_len, 0], ((- amp) * 1), (np.pi / sin_len), (np.pi / 2), line_types=[c, c], forbidden=False))
net.add_lane('s1', 'inter1', SineLane([0, width], [sin_len, width], ((- amp) * 1), (np.pi / sin_len), (np.pi / 2), line_types=[c, c], forbidden=False))
net.add_lane('s2', 'inter1', SineLane([0, ((2 * amp) + (2 * width))], [sin_len, ((2 * amp) + (2 * width))], ((- amp) * (- 1)), (np.pi / sin_len), (np.pi / 2), line_types=[c, c], forbidden=False))
net.add_lane('s2', 'inter1', SineLane([0, ((2 * amp) + (3 * width))], [sin_len, ((2 * amp) + (3 * width))], ((- amp) * (- 1)), (np.pi / sin_len), (np.pi / 2), line_types=[c, c], forbidden=False))
net.add_lane('inter1', 'inter2', StraightLane([sin_len, amp], [(sin_len + str_len), amp], line_types=[c, s]))
net.add_lane('inter1', 'inter2', StraightLane([sin_len, (amp + width)], [(sin_len + str_len), (amp + width)], line_types=[s, s]))
net.add_lane('inter1', 'inter2', StraightLane([sin_len, (amp + (2 * width))], [(sin_len + str_len), (amp + (2 * width))], line_types=[s, s]))
net.add_lane('inter1', 'inter2', StraightLane([sin_len, (amp + (3 * width))], [(sin_len + str_len), (amp + (3 * width))], line_types=[s, c]))
net.add_lane('inter2', 'e1', SineLane([(sin_len + str_len), 0], [((2 * sin_len) + str_len), 0], (amp * 1), (np.pi / sin_len), (np.pi / 2), line_types=[c, c], forbidden=False))
net.add_lane('inter2', 'e1', SineLane([(sin_len + str_len), width], [((2 * sin_len) + str_len), width], (amp * 1), (np.pi / sin_len), (np.pi / 2), line_types=[c, c], forbidden=False))
net.add_lane('inter2', 'e2', SineLane([(sin_len + str_len), ((2 * width) + (2 * amp))], [((2 * sin_len) + str_len), ((2 * width) + (2 * amp))], (amp * (- 1)), (np.pi / sin_len), (np.pi / 2), line_types=[c, c], forbidden=False))
net.add_lane('inter2', 'e2', SineLane([(sin_len + str_len), ((3 * width) + (2 * amp))], [((2 * sin_len) + str_len), ((3 * width) + (2 * amp))], (amp * (- 1)), (np.pi / sin_len), (np.pi / 2), line_types=[c, c], forbidden=False))
road = Road(network=net, np_random=self.np_random)
self.road = road | def make_road(self):
'\n Make a road composed of a straight highway and a merging lane.\n :return: the road\n '
amp = 3.25
str_len = 150
sin_len = 100
width = 4
net = RoadNetwork()
(n, c, s) = (LineType.NONE, LineType.CONTINUOUS, LineType.STRIPED)
net.add_lane('s1', 'inter1', SineLane([0, 0], [sin_len, 0], ((- amp) * 1), (np.pi / sin_len), (np.pi / 2), line_types=[c, c], forbidden=False))
net.add_lane('s1', 'inter1', SineLane([0, width], [sin_len, width], ((- amp) * 1), (np.pi / sin_len), (np.pi / 2), line_types=[c, c], forbidden=False))
net.add_lane('s2', 'inter1', SineLane([0, ((2 * amp) + (2 * width))], [sin_len, ((2 * amp) + (2 * width))], ((- amp) * (- 1)), (np.pi / sin_len), (np.pi / 2), line_types=[c, c], forbidden=False))
net.add_lane('s2', 'inter1', SineLane([0, ((2 * amp) + (3 * width))], [sin_len, ((2 * amp) + (3 * width))], ((- amp) * (- 1)), (np.pi / sin_len), (np.pi / 2), line_types=[c, c], forbidden=False))
net.add_lane('inter1', 'inter2', StraightLane([sin_len, amp], [(sin_len + str_len), amp], line_types=[c, s]))
net.add_lane('inter1', 'inter2', StraightLane([sin_len, (amp + width)], [(sin_len + str_len), (amp + width)], line_types=[s, s]))
net.add_lane('inter1', 'inter2', StraightLane([sin_len, (amp + (2 * width))], [(sin_len + str_len), (amp + (2 * width))], line_types=[s, s]))
net.add_lane('inter1', 'inter2', StraightLane([sin_len, (amp + (3 * width))], [(sin_len + str_len), (amp + (3 * width))], line_types=[s, c]))
net.add_lane('inter2', 'e1', SineLane([(sin_len + str_len), 0], [((2 * sin_len) + str_len), 0], (amp * 1), (np.pi / sin_len), (np.pi / 2), line_types=[c, c], forbidden=False))
net.add_lane('inter2', 'e1', SineLane([(sin_len + str_len), width], [((2 * sin_len) + str_len), width], (amp * 1), (np.pi / sin_len), (np.pi / 2), line_types=[c, c], forbidden=False))
net.add_lane('inter2', 'e2', SineLane([(sin_len + str_len), ((2 * width) + (2 * amp))], [((2 * sin_len) + str_len), ((2 * width) + (2 * amp))], (amp * (- 1)), (np.pi / sin_len), (np.pi / 2), line_types=[c, c], forbidden=False))
net.add_lane('inter2', 'e2', SineLane([(sin_len + str_len), ((3 * width) + (2 * amp))], [((2 * sin_len) + str_len), ((3 * width) + (2 * amp))], (amp * (- 1)), (np.pi / sin_len), (np.pi / 2), line_types=[c, c], forbidden=False))
road = Road(network=net, np_random=self.np_random)
self.road = road<|docstring|>Make a road composed of a straight highway and a merging lane.
:return: the road<|endoftext|> |
9369debabd80db59a1b509a164c521deb52b27b36f796b69ad15bb94a6c1cf56 | def make_vehicles(self):
'\n Populate a road with several vehicles on the highway and on the merging lane, as well as an ego-vehicle.\n :return: the ego-vehicle\n '
max_l = 300
road = self.road
other_vehicles_type = utils.class_from_path(self.config['other_vehicles_type'])
car_number_each_lane = 2
reset_lane = ('s1', 'inter1', 1)
ego_vehicle = None
birth_place = [('s1', 'inter1', 0), ('s1', 'inter1', 1), ('s2', 'inter1', 0), ('s2', 'inter1', 1)]
destinations = ['e1', 'e2']
position_deviation = 10
velocity_deviation = 2
for l in self.road.network.LANES:
lane = road.network.get_lane(l)
cars_on_lane = car_number_each_lane
reset_position = None
if (l == reset_lane):
cars_on_lane += 1
reset_position = random.choice(range(1, car_number_each_lane))
for i in range(cars_on_lane):
if ((i == reset_position) and (not ego_vehicle)):
ego_lane = self.road.network.get_lane(('s1', 'inter1', 1))
ego_vehicle = IDMVehicle(self.road, ego_lane.position(20, 0), velocity=10, heading=ego_lane.heading_at(0)).plan_route_to('e2')
ego_vehicle.id = 0
road.vehicles.append(ego_vehicle)
self.vehicle = ego_vehicle
else:
car = other_vehicles_type.make_on_lane(road, birth_place[np.random.randint(0, 4)], longitudinal=(0 + (np.random.randint(1, 5) * position_deviation)), velocity=(5 + (np.random.randint(1, 5) * velocity_deviation)))
if (self.config['other_vehicles_destination'] is not None):
destination = destinations[self.config['other_vehicles_destination']]
else:
destination = destinations[np.random.randint(0, 2)]
car.plan_route_to(destination)
car.randomize_behavior()
road.vehicles.append(car)
lane.vehicles.append(car)
for i in range(self.road.network.LANES_NUMBER):
lane = road.network.get_lane(self.road.network.LANES[i])
lane.vehicles = sorted(lane.vehicles, key=(lambda x: lane.local_coordinates(x.position)[0]))
for (j, v) in enumerate(lane.vehicles):
v.vehicle_index_in_line = j | Populate a road with several vehicles on the highway and on the merging lane, as well as an ego-vehicle.
:return: the ego-vehicle | highway_env/envs/merge_env.py | make_vehicles | jasonplato/High_SimulationPlatform | 0 | python | def make_vehicles(self):
'\n Populate a road with several vehicles on the highway and on the merging lane, as well as an ego-vehicle.\n :return: the ego-vehicle\n '
max_l = 300
road = self.road
other_vehicles_type = utils.class_from_path(self.config['other_vehicles_type'])
car_number_each_lane = 2
reset_lane = ('s1', 'inter1', 1)
ego_vehicle = None
birth_place = [('s1', 'inter1', 0), ('s1', 'inter1', 1), ('s2', 'inter1', 0), ('s2', 'inter1', 1)]
destinations = ['e1', 'e2']
position_deviation = 10
velocity_deviation = 2
for l in self.road.network.LANES:
lane = road.network.get_lane(l)
cars_on_lane = car_number_each_lane
reset_position = None
if (l == reset_lane):
cars_on_lane += 1
reset_position = random.choice(range(1, car_number_each_lane))
for i in range(cars_on_lane):
if ((i == reset_position) and (not ego_vehicle)):
ego_lane = self.road.network.get_lane(('s1', 'inter1', 1))
ego_vehicle = IDMVehicle(self.road, ego_lane.position(20, 0), velocity=10, heading=ego_lane.heading_at(0)).plan_route_to('e2')
ego_vehicle.id = 0
road.vehicles.append(ego_vehicle)
self.vehicle = ego_vehicle
else:
car = other_vehicles_type.make_on_lane(road, birth_place[np.random.randint(0, 4)], longitudinal=(0 + (np.random.randint(1, 5) * position_deviation)), velocity=(5 + (np.random.randint(1, 5) * velocity_deviation)))
if (self.config['other_vehicles_destination'] is not None):
destination = destinations[self.config['other_vehicles_destination']]
else:
destination = destinations[np.random.randint(0, 2)]
car.plan_route_to(destination)
car.randomize_behavior()
road.vehicles.append(car)
lane.vehicles.append(car)
for i in range(self.road.network.LANES_NUMBER):
lane = road.network.get_lane(self.road.network.LANES[i])
lane.vehicles = sorted(lane.vehicles, key=(lambda x: lane.local_coordinates(x.position)[0]))
for (j, v) in enumerate(lane.vehicles):
v.vehicle_index_in_line = j | def make_vehicles(self):
'\n Populate a road with several vehicles on the highway and on the merging lane, as well as an ego-vehicle.\n :return: the ego-vehicle\n '
max_l = 300
road = self.road
other_vehicles_type = utils.class_from_path(self.config['other_vehicles_type'])
car_number_each_lane = 2
reset_lane = ('s1', 'inter1', 1)
ego_vehicle = None
birth_place = [('s1', 'inter1', 0), ('s1', 'inter1', 1), ('s2', 'inter1', 0), ('s2', 'inter1', 1)]
destinations = ['e1', 'e2']
position_deviation = 10
velocity_deviation = 2
for l in self.road.network.LANES:
lane = road.network.get_lane(l)
cars_on_lane = car_number_each_lane
reset_position = None
if (l == reset_lane):
cars_on_lane += 1
reset_position = random.choice(range(1, car_number_each_lane))
for i in range(cars_on_lane):
if ((i == reset_position) and (not ego_vehicle)):
ego_lane = self.road.network.get_lane(('s1', 'inter1', 1))
ego_vehicle = IDMVehicle(self.road, ego_lane.position(20, 0), velocity=10, heading=ego_lane.heading_at(0)).plan_route_to('e2')
ego_vehicle.id = 0
road.vehicles.append(ego_vehicle)
self.vehicle = ego_vehicle
else:
car = other_vehicles_type.make_on_lane(road, birth_place[np.random.randint(0, 4)], longitudinal=(0 + (np.random.randint(1, 5) * position_deviation)), velocity=(5 + (np.random.randint(1, 5) * velocity_deviation)))
if (self.config['other_vehicles_destination'] is not None):
destination = destinations[self.config['other_vehicles_destination']]
else:
destination = destinations[np.random.randint(0, 2)]
car.plan_route_to(destination)
car.randomize_behavior()
road.vehicles.append(car)
lane.vehicles.append(car)
for i in range(self.road.network.LANES_NUMBER):
lane = road.network.get_lane(self.road.network.LANES[i])
lane.vehicles = sorted(lane.vehicles, key=(lambda x: lane.local_coordinates(x.position)[0]))
for (j, v) in enumerate(lane.vehicles):
v.vehicle_index_in_line = j<|docstring|>Populate a road with several vehicles on the highway and on the merging lane, as well as an ego-vehicle.
:return: the ego-vehicle<|endoftext|> |
fa7d359b84e77a7572eb9a4a2952531d235fe21e4b5321ea53b29cc97e20fa17 | def fake_step(self):
'\n :return:\n '
for k in range(int((self.SIMULATION_FREQUENCY // self.POLICY_FREQUENCY))):
self.road.act()
self.road.step((1 / self.SIMULATION_FREQUENCY))
self._automatic_rendering()
if (self.done or self._is_terminal()):
break
self.enable_auto_render = False
self.steps += 1
from highway_env.extractors import Extractor
extractor = Extractor()
extractor_features = extractor.FeatureExtractor(self.road.vehicles, 0, 1)
for i in range(1):
birth_place = [('s1', 'inter1', 0), ('s1', 'inter1', 1), ('s2', 'inter1', 0), ('s2', 'inter1', 1)]
destinations = ['e1', 'e2']
position_deviation = 5
velocity_deviation = 1.5
other_vehicles_type = utils.class_from_path(self.config['other_vehicles_type'])
birth = birth_place[np.random.randint(0, 4)]
lane = self.road.network.get_lane(birth)
car = other_vehicles_type.make_on_lane(self.road, birth, longitudinal=(0 + (np.random.randint(1, 5) * position_deviation)), velocity=(5 + (np.random.randint(1, 10) * velocity_deviation)))
if (self.config['incoming_vehicle_destination'] is not None):
destination = destinations[self.config['incoming_vehicle_destination']]
else:
destination = destinations[np.random.randint(0, 2)]
car.plan_route_to(destination)
car.randomize_behavior()
self.road.vehicles.append(car)
lane.vehicles.append(car)
terminal = self._is_terminal()
info = {}
return (terminal, extractor_features) | :return: | highway_env/envs/merge_env.py | fake_step | jasonplato/High_SimulationPlatform | 0 | python | def fake_step(self):
'\n \n '
for k in range(int((self.SIMULATION_FREQUENCY // self.POLICY_FREQUENCY))):
self.road.act()
self.road.step((1 / self.SIMULATION_FREQUENCY))
self._automatic_rendering()
if (self.done or self._is_terminal()):
break
self.enable_auto_render = False
self.steps += 1
from highway_env.extractors import Extractor
extractor = Extractor()
extractor_features = extractor.FeatureExtractor(self.road.vehicles, 0, 1)
for i in range(1):
birth_place = [('s1', 'inter1', 0), ('s1', 'inter1', 1), ('s2', 'inter1', 0), ('s2', 'inter1', 1)]
destinations = ['e1', 'e2']
position_deviation = 5
velocity_deviation = 1.5
other_vehicles_type = utils.class_from_path(self.config['other_vehicles_type'])
birth = birth_place[np.random.randint(0, 4)]
lane = self.road.network.get_lane(birth)
car = other_vehicles_type.make_on_lane(self.road, birth, longitudinal=(0 + (np.random.randint(1, 5) * position_deviation)), velocity=(5 + (np.random.randint(1, 10) * velocity_deviation)))
if (self.config['incoming_vehicle_destination'] is not None):
destination = destinations[self.config['incoming_vehicle_destination']]
else:
destination = destinations[np.random.randint(0, 2)]
car.plan_route_to(destination)
car.randomize_behavior()
self.road.vehicles.append(car)
lane.vehicles.append(car)
terminal = self._is_terminal()
info = {}
return (terminal, extractor_features) | def fake_step(self):
'\n \n '
for k in range(int((self.SIMULATION_FREQUENCY // self.POLICY_FREQUENCY))):
self.road.act()
self.road.step((1 / self.SIMULATION_FREQUENCY))
self._automatic_rendering()
if (self.done or self._is_terminal()):
break
self.enable_auto_render = False
self.steps += 1
from highway_env.extractors import Extractor
extractor = Extractor()
extractor_features = extractor.FeatureExtractor(self.road.vehicles, 0, 1)
for i in range(1):
birth_place = [('s1', 'inter1', 0), ('s1', 'inter1', 1), ('s2', 'inter1', 0), ('s2', 'inter1', 1)]
destinations = ['e1', 'e2']
position_deviation = 5
velocity_deviation = 1.5
other_vehicles_type = utils.class_from_path(self.config['other_vehicles_type'])
birth = birth_place[np.random.randint(0, 4)]
lane = self.road.network.get_lane(birth)
car = other_vehicles_type.make_on_lane(self.road, birth, longitudinal=(0 + (np.random.randint(1, 5) * position_deviation)), velocity=(5 + (np.random.randint(1, 10) * velocity_deviation)))
if (self.config['incoming_vehicle_destination'] is not None):
destination = destinations[self.config['incoming_vehicle_destination']]
else:
destination = destinations[np.random.randint(0, 2)]
car.plan_route_to(destination)
car.randomize_behavior()
self.road.vehicles.append(car)
lane.vehicles.append(car)
terminal = self._is_terminal()
info = {}
return (terminal, extractor_features)<|docstring|>:return:<|endoftext|> |
28487f9fa39002d71b08279468a48de4bf26d60b04f418344044fb24627efa64 | def clean_whitespace(text):
'\n Remove any extra whitespace and line breaks as needed.\n '
import re
text = text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ')
text = text.strip()
text = re.sub(' +', ' ', text)
return text | Remove any extra whitespace and line breaks as needed. | chatterbot/utils.py | clean_whitespace | 12520054/pybot | 0 | python | def clean_whitespace(text):
'\n \n '
import re
text = text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ')
text = text.strip()
text = re.sub(' +', ' ', text)
return text | def clean_whitespace(text):
'\n \n '
import re
text = text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ')
text = text.strip()
text = re.sub(' +', ' ', text)
return text<|docstring|>Remove any extra whitespace and line breaks as needed.<|endoftext|> |
97e34431f7c7c4feca293df9836b0aa8d73f575503f69cbea63af3bd620c7d6c | def clean(text):
'\n A function for cleaning a string of text.\n Returns valid ASCII characters.\n '
import unicodedata
import sys
text = clean_whitespace(text)
if (sys.version_info[0] < 3):
from HTMLParser import HTMLParser
parser = HTMLParser()
text = parser.unescape(text)
else:
import html
text = html.unescape(text)
if (sys.version_info[0] < 3):
text = unicode(text)
text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf-8')
return str(text) | A function for cleaning a string of text.
Returns valid ASCII characters. | chatterbot/utils.py | clean | 12520054/pybot | 0 | python | def clean(text):
'\n A function for cleaning a string of text.\n Returns valid ASCII characters.\n '
import unicodedata
import sys
text = clean_whitespace(text)
if (sys.version_info[0] < 3):
from HTMLParser import HTMLParser
parser = HTMLParser()
text = parser.unescape(text)
else:
import html
text = html.unescape(text)
if (sys.version_info[0] < 3):
text = unicode(text)
text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf-8')
return str(text) | def clean(text):
'\n A function for cleaning a string of text.\n Returns valid ASCII characters.\n '
import unicodedata
import sys
text = clean_whitespace(text)
if (sys.version_info[0] < 3):
from HTMLParser import HTMLParser
parser = HTMLParser()
text = parser.unescape(text)
else:
import html
text = html.unescape(text)
if (sys.version_info[0] < 3):
text = unicode(text)
text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf-8')
return str(text)<|docstring|>A function for cleaning a string of text.
Returns valid ASCII characters.<|endoftext|> |
09710e8c323f5f23c977342a74341ed724b459d584dd1b5cf90e84f8f124bf88 | def import_module(dotted_path):
'\n Imports the specified module based on the\n dot notated import path for the module.\n '
import importlib
module_parts = dotted_path.split('.')
module_path = '.'.join(module_parts[:(- 1)])
module = importlib.import_module(module_path)
return getattr(module, module_parts[(- 1)]) | Imports the specified module based on the
dot notated import path for the module. | chatterbot/utils.py | import_module | 12520054/pybot | 0 | python | def import_module(dotted_path):
'\n Imports the specified module based on the\n dot notated import path for the module.\n '
import importlib
module_parts = dotted_path.split('.')
module_path = '.'.join(module_parts[:(- 1)])
module = importlib.import_module(module_path)
return getattr(module, module_parts[(- 1)]) | def import_module(dotted_path):
'\n Imports the specified module based on the\n dot notated import path for the module.\n '
import importlib
module_parts = dotted_path.split('.')
module_path = '.'.join(module_parts[:(- 1)])
module = importlib.import_module(module_path)
return getattr(module, module_parts[(- 1)])<|docstring|>Imports the specified module based on the
dot notated import path for the module.<|endoftext|> |
06e358de72733c71eeae9effc01212e8e8e5bcc38099dc0752e3aeb96c210c5a | def input_function():
"\n Normalizes reading input between python 2 and 3.\n The function 'raw_input' becomes 'input' in Python 3.\n "
import sys
if (sys.version_info[0] < 3):
user_input = str(raw_input())
if user_input:
user_input = user_input.decode('utf-8')
else:
user_input = input()
return user_input | Normalizes reading input between python 2 and 3.
The function 'raw_input' becomes 'input' in Python 3. | chatterbot/utils.py | input_function | 12520054/pybot | 0 | python | def input_function():
"\n Normalizes reading input between python 2 and 3.\n The function 'raw_input' becomes 'input' in Python 3.\n "
import sys
if (sys.version_info[0] < 3):
user_input = str(raw_input())
if user_input:
user_input = user_input.decode('utf-8')
else:
user_input = input()
return user_input | def input_function():
"\n Normalizes reading input between python 2 and 3.\n The function 'raw_input' becomes 'input' in Python 3.\n "
import sys
if (sys.version_info[0] < 3):
user_input = str(raw_input())
if user_input:
user_input = user_input.decode('utf-8')
else:
user_input = input()
return user_input<|docstring|>Normalizes reading input between python 2 and 3.
The function 'raw_input' becomes 'input' in Python 3.<|endoftext|> |
3b349066b922f7484661d68bdcb21d2c170239c345340edd1f8ca6bc6bef13d1 | def nltk_download_corpus(corpus_name):
'\n Download the specified NLTK corpus file\n unless it has already been downloaded.\n\n Returns True if the corpus needed to be downloaded.\n '
from nltk.data import find
from nltk import download
zip_file = '{}.zip'.format(corpus_name)
downloaded = False
try:
find(zip_file)
except LookupError:
download(corpus_name)
downloaded = True
return downloaded | Download the specified NLTK corpus file
unless it has already been downloaded.
Returns True if the corpus needed to be downloaded. | chatterbot/utils.py | nltk_download_corpus | 12520054/pybot | 0 | python | def nltk_download_corpus(corpus_name):
'\n Download the specified NLTK corpus file\n unless it has already been downloaded.\n\n Returns True if the corpus needed to be downloaded.\n '
from nltk.data import find
from nltk import download
zip_file = '{}.zip'.format(corpus_name)
downloaded = False
try:
find(zip_file)
except LookupError:
download(corpus_name)
downloaded = True
return downloaded | def nltk_download_corpus(corpus_name):
'\n Download the specified NLTK corpus file\n unless it has already been downloaded.\n\n Returns True if the corpus needed to be downloaded.\n '
from nltk.data import find
from nltk import download
zip_file = '{}.zip'.format(corpus_name)
downloaded = False
try:
find(zip_file)
except LookupError:
download(corpus_name)
downloaded = True
return downloaded<|docstring|>Download the specified NLTK corpus file
unless it has already been downloaded.
Returns True if the corpus needed to be downloaded.<|endoftext|> |
44cddc5eb1ab6b6e75c57c5dc10495a0a547223bf0fe87e14137ac3b6f64bd65 | def remove_stopwords(tokens, language):
'\n Takes a language (i.e. \'english\'), and a set of word tokens.\n Returns the tokenized text with any stopwords removed.\n Stop words are words like "is, the, a, ..."\n '
from nltk.corpus import stopwords
stop_words = stopwords.words(language)
tokens = (set(tokens) - set(stop_words))
return tokens | Takes a language (i.e. 'english'), and a set of word tokens.
Returns the tokenized text with any stopwords removed.
Stop words are words like "is, the, a, ..." | chatterbot/utils.py | remove_stopwords | 12520054/pybot | 0 | python | def remove_stopwords(tokens, language):
'\n Takes a language (i.e. \'english\'), and a set of word tokens.\n Returns the tokenized text with any stopwords removed.\n Stop words are words like "is, the, a, ..."\n '
from nltk.corpus import stopwords
stop_words = stopwords.words(language)
tokens = (set(tokens) - set(stop_words))
return tokens | def remove_stopwords(tokens, language):
'\n Takes a language (i.e. \'english\'), and a set of word tokens.\n Returns the tokenized text with any stopwords removed.\n Stop words are words like "is, the, a, ..."\n '
from nltk.corpus import stopwords
stop_words = stopwords.words(language)
tokens = (set(tokens) - set(stop_words))
return tokens<|docstring|>Takes a language (i.e. 'english'), and a set of word tokens.
Returns the tokenized text with any stopwords removed.
Stop words are words like "is, the, a, ..."<|endoftext|> |
5e1b63728e7e2aaba65a5a9d4a47c34b07d0990c33555a0d117499313387ee00 | def to_integer_lattice(f: SetObjective, S: AbstractSet[int]) -> NDArray[int]:
'\n Convert a set submodular solution to an integer lattice solution.\n :param f: set submodular function\n :param S: set submodular solution\n '
n = f.original_n
counter = Counter(((e % n) for e in S))
x = np.zeros((n,), dtype=int)
for (e, c) in counter.items():
x[e] = c
return x | Convert a set submodular solution to an integer lattice solution.
:param f: set submodular function
:param S: set submodular solution | python/benchmark/utils/bridge/to_integer_lattice.py | to_integer_lattice | jkomyno/lattice-submodular-maximization | 1 | python | def to_integer_lattice(f: SetObjective, S: AbstractSet[int]) -> NDArray[int]:
'\n Convert a set submodular solution to an integer lattice solution.\n :param f: set submodular function\n :param S: set submodular solution\n '
n = f.original_n
counter = Counter(((e % n) for e in S))
x = np.zeros((n,), dtype=int)
for (e, c) in counter.items():
x[e] = c
return x | def to_integer_lattice(f: SetObjective, S: AbstractSet[int]) -> NDArray[int]:
'\n Convert a set submodular solution to an integer lattice solution.\n :param f: set submodular function\n :param S: set submodular solution\n '
n = f.original_n
counter = Counter(((e % n) for e in S))
x = np.zeros((n,), dtype=int)
for (e, c) in counter.items():
x[e] = c
return x<|docstring|>Convert a set submodular solution to an integer lattice solution.
:param f: set submodular function
:param S: set submodular solution<|endoftext|> |
b98b36a26394a122da666139a3f0fd896a145f29a84f51646f333591cb74cb0e | def get_gnomonic_projection(figure, hpx_map, **kwargs):
'\n Returns an array containing the Gnomonic projection of the provided Healpix map.\n\n This is equivalent to hp.gnomview of Healpy BUT the projected array is NOT plotted in the figure, so you can\n plot it later on.\n\n :param figure: a matplotlib Figure\n :param hpx_map: the healpix map\n :param **kwargs: keywords accepted by hp.gnomview\n :return: the array containing the projection.\n '
defaults = {'coord': 'C', 'rot': None, 'format': '%g', 'flip': 'astro', 'xsize': 200, 'ysize': None, 'reso': 1.5, 'nest': False, 'min': None, 'max': None, 'cmap': None, 'norm': None}
for (key, default_value) in list(defaults.items()):
if (key not in kwargs):
kwargs[key] = default_value
extent = (0.05, 0.05, 0.9, 0.9)
ax = PA.HpxGnomonicAxes(figure, extent, coord=kwargs['coord'], rot=kwargs['rot'], format=kwargs['format'], flipconv=kwargs['flip'])
with np.warnings.catch_warnings():
np.warnings.filterwarnings('ignore')
img = ax.projmap(hpx_map, nest=kwargs['nest'], coord=kwargs['coord'], vmin=kwargs['min'], vmax=kwargs['max'], xsize=kwargs['xsize'], ysize=kwargs['ysize'], reso=kwargs['reso'], cmap=kwargs['cmap'], norm=kwargs['norm'])
return img | Returns an array containing the Gnomonic projection of the provided Healpix map.
This is equivalent to hp.gnomview of Healpy BUT the projected array is NOT plotted in the figure, so you can
plot it later on.
:param figure: a matplotlib Figure
:param hpx_map: the healpix map
:param **kwargs: keywords accepted by hp.gnomview
:return: the array containing the projection. | hawc_hal/healpix_handling/gnomonic_projection.py | get_gnomonic_projection | torresramiro350/hawc_hal | 7 | python | def get_gnomonic_projection(figure, hpx_map, **kwargs):
'\n Returns an array containing the Gnomonic projection of the provided Healpix map.\n\n This is equivalent to hp.gnomview of Healpy BUT the projected array is NOT plotted in the figure, so you can\n plot it later on.\n\n :param figure: a matplotlib Figure\n :param hpx_map: the healpix map\n :param **kwargs: keywords accepted by hp.gnomview\n :return: the array containing the projection.\n '
defaults = {'coord': 'C', 'rot': None, 'format': '%g', 'flip': 'astro', 'xsize': 200, 'ysize': None, 'reso': 1.5, 'nest': False, 'min': None, 'max': None, 'cmap': None, 'norm': None}
for (key, default_value) in list(defaults.items()):
if (key not in kwargs):
kwargs[key] = default_value
extent = (0.05, 0.05, 0.9, 0.9)
ax = PA.HpxGnomonicAxes(figure, extent, coord=kwargs['coord'], rot=kwargs['rot'], format=kwargs['format'], flipconv=kwargs['flip'])
with np.warnings.catch_warnings():
np.warnings.filterwarnings('ignore')
img = ax.projmap(hpx_map, nest=kwargs['nest'], coord=kwargs['coord'], vmin=kwargs['min'], vmax=kwargs['max'], xsize=kwargs['xsize'], ysize=kwargs['ysize'], reso=kwargs['reso'], cmap=kwargs['cmap'], norm=kwargs['norm'])
return img | def get_gnomonic_projection(figure, hpx_map, **kwargs):
'\n Returns an array containing the Gnomonic projection of the provided Healpix map.\n\n This is equivalent to hp.gnomview of Healpy BUT the projected array is NOT plotted in the figure, so you can\n plot it later on.\n\n :param figure: a matplotlib Figure\n :param hpx_map: the healpix map\n :param **kwargs: keywords accepted by hp.gnomview\n :return: the array containing the projection.\n '
defaults = {'coord': 'C', 'rot': None, 'format': '%g', 'flip': 'astro', 'xsize': 200, 'ysize': None, 'reso': 1.5, 'nest': False, 'min': None, 'max': None, 'cmap': None, 'norm': None}
for (key, default_value) in list(defaults.items()):
if (key not in kwargs):
kwargs[key] = default_value
extent = (0.05, 0.05, 0.9, 0.9)
ax = PA.HpxGnomonicAxes(figure, extent, coord=kwargs['coord'], rot=kwargs['rot'], format=kwargs['format'], flipconv=kwargs['flip'])
with np.warnings.catch_warnings():
np.warnings.filterwarnings('ignore')
img = ax.projmap(hpx_map, nest=kwargs['nest'], coord=kwargs['coord'], vmin=kwargs['min'], vmax=kwargs['max'], xsize=kwargs['xsize'], ysize=kwargs['ysize'], reso=kwargs['reso'], cmap=kwargs['cmap'], norm=kwargs['norm'])
return img<|docstring|>Returns an array containing the Gnomonic projection of the provided Healpix map.
This is equivalent to hp.gnomview of Healpy BUT the projected array is NOT plotted in the figure, so you can
plot it later on.
:param figure: a matplotlib Figure
:param hpx_map: the healpix map
:param **kwargs: keywords accepted by hp.gnomview
:return: the array containing the projection.<|endoftext|> |
0e643e538cb11b6378a56b1eecc928cc764bcd73ebe603d0eeea00403029bf46 | def log_string(user_msg):
' Prints the desired string to the shell, preceded by the date and time.\n '
with print_lock:
print(((datetime.now().strftime('%H.%M.%S.%f') + ' ') + user_msg)) | Prints the desired string to the shell, preceded by the date and time. | soccer_hardware/src/soccer_hardware/utility.py | log_string | ghsecuritylab/soccer_ws | 56 | python | def log_string(user_msg):
' \n '
with print_lock:
print(((datetime.now().strftime('%H.%M.%S.%f') + ' ') + user_msg)) | def log_string(user_msg):
' \n '
with print_lock:
print(((datetime.now().strftime('%H.%M.%S.%f') + ' ') + user_msg))<|docstring|>Prints the desired string to the shell, preceded by the date and time.<|endoftext|> |
4b73bd50cc90cbe16714034a4903f4f168e6f21f8e9286a02429258c54046742 | def mask_to_rgb(mask):
'\n Converts a mask to RGB Format\n '
colours = visualize.random_colors(mask.shape[2])
rgb_mask = np.zeros((mask.shape[0], mask.shape[1], 3))
for i in range(mask.shape[2]):
for c in range(3):
rgb_mask[(:, :, c)] = np.where((mask[(:, :, i)] != 0), int((colours[i][c] * 255)), rgb_mask[(:, :, c)])
return rgb_mask | Converts a mask to RGB Format | UPGen/utils.py | mask_to_rgb | HenryLiangzy/COMP9517_Group | 21 | python | def mask_to_rgb(mask):
'\n \n '
colours = visualize.random_colors(mask.shape[2])
rgb_mask = np.zeros((mask.shape[0], mask.shape[1], 3))
for i in range(mask.shape[2]):
for c in range(3):
rgb_mask[(:, :, c)] = np.where((mask[(:, :, i)] != 0), int((colours[i][c] * 255)), rgb_mask[(:, :, c)])
return rgb_mask | def mask_to_rgb(mask):
'\n \n '
colours = visualize.random_colors(mask.shape[2])
rgb_mask = np.zeros((mask.shape[0], mask.shape[1], 3))
for i in range(mask.shape[2]):
for c in range(3):
rgb_mask[(:, :, c)] = np.where((mask[(:, :, i)] != 0), int((colours[i][c] * 255)), rgb_mask[(:, :, c)])
return rgb_mask<|docstring|>Converts a mask to RGB Format<|endoftext|> |
2c62ddca20b33ccd2bbcff7161b5b91bedb8696719097f375571c70461b9f754 | def mask_to_outlined(mask):
'\n Converts a mask to RGB Format\n '
colours = visualize.random_colors(mask.shape[2])
rgb_mask = np.zeros((mask.shape[0], mask.shape[1], 3))
for i in range(mask.shape[2]):
for c in range(3):
rgb_mask[(:, :, c)] = np.where((mask[(:, :, i)] != 0), int((colours[i][c] * 255)), rgb_mask[(:, :, c)])
for i in range(mask.shape[2]):
threshold = mask[(:, :, i)]
threshold[(threshold != 0)] = 255
(_, contours, hierarchy) = cv2.findContours(threshold.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if (len(contours) > 0):
cv2.drawContours(rgb_mask, [contours[0]], 0, (255, 255, 255), thickness=1)
return rgb_mask | Converts a mask to RGB Format | UPGen/utils.py | mask_to_outlined | HenryLiangzy/COMP9517_Group | 21 | python | def mask_to_outlined(mask):
'\n \n '
colours = visualize.random_colors(mask.shape[2])
rgb_mask = np.zeros((mask.shape[0], mask.shape[1], 3))
for i in range(mask.shape[2]):
for c in range(3):
rgb_mask[(:, :, c)] = np.where((mask[(:, :, i)] != 0), int((colours[i][c] * 255)), rgb_mask[(:, :, c)])
for i in range(mask.shape[2]):
threshold = mask[(:, :, i)]
threshold[(threshold != 0)] = 255
(_, contours, hierarchy) = cv2.findContours(threshold.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if (len(contours) > 0):
cv2.drawContours(rgb_mask, [contours[0]], 0, (255, 255, 255), thickness=1)
return rgb_mask | def mask_to_outlined(mask):
'\n \n '
colours = visualize.random_colors(mask.shape[2])
rgb_mask = np.zeros((mask.shape[0], mask.shape[1], 3))
for i in range(mask.shape[2]):
for c in range(3):
rgb_mask[(:, :, c)] = np.where((mask[(:, :, i)] != 0), int((colours[i][c] * 255)), rgb_mask[(:, :, c)])
for i in range(mask.shape[2]):
threshold = mask[(:, :, i)]
threshold[(threshold != 0)] = 255
(_, contours, hierarchy) = cv2.findContours(threshold.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if (len(contours) > 0):
cv2.drawContours(rgb_mask, [contours[0]], 0, (255, 255, 255), thickness=1)
return rgb_mask<|docstring|>Converts a mask to RGB Format<|endoftext|> |
6d92f67d3edaa3d2e6841c2be83bfa0fab6d7003202257c436ab9c8bde942e83 | def log_line(self, *args):
'\n Write each thing to the log file\n '
self.time_stamp()
for log_item in args:
self.log_file.write((str(log_item) + ' '))
self.log_file.write('\n')
self.flush() | Write each thing to the log file | UPGen/utils.py | log_line | HenryLiangzy/COMP9517_Group | 21 | python | def log_line(self, *args):
'\n \n '
self.time_stamp()
for log_item in args:
self.log_file.write((str(log_item) + ' '))
self.log_file.write('\n')
self.flush() | def log_line(self, *args):
'\n \n '
self.time_stamp()
for log_item in args:
self.log_file.write((str(log_item) + ' '))
self.log_file.write('\n')
self.flush()<|docstring|>Write each thing to the log file<|endoftext|> |
a0b543a3c2b39b101f250a1a03d3be2ce9eeaf19b5f482ab6c64b1a3a90abd16 | def log(self, *args):
'\n Write each thing to the log file\n '
self.time_stamp()
for log_item in args:
self.log_file.write((str(log_item) + ' '))
self.flush() | Write each thing to the log file | UPGen/utils.py | log | HenryLiangzy/COMP9517_Group | 21 | python | def log(self, *args):
'\n \n '
self.time_stamp()
for log_item in args:
self.log_file.write((str(log_item) + ' '))
self.flush() | def log(self, *args):
'\n \n '
self.time_stamp()
for log_item in args:
self.log_file.write((str(log_item) + ' '))
self.flush()<|docstring|>Write each thing to the log file<|endoftext|> |
7af3ce263a7d3ffe74049e5da8b06e8ee5c63aa37406f3035c2024366a15b194 | def add_sidechain_vector(g: nx.Graph, scale: bool=True, reverse: bool=False):
'Adds vector from node to average position of sidechain atoms.\n\n We compute the mean of the sidechain atoms for each node. For this we use the ``rgroup_df`` dataframe.\n If the graph does not contain the ``rgroup_df`` dataframe, we compute it from the ``raw_pdb_df``.\n If scale, we scale the vector to the unit vector. If reverse is True,\n we reverse the vector (``sidechain - node``). If reverse is false (default) we compute (``node - sidechain``).\n\n :param g: Graph to add vector to.\n :type g: nx.Graph\n :param scale: Scale vector to unit vector. Defaults to ``True``.\n :type scale: bool\n :param reverse: Reverse vector. Defaults to ``False``.\n :type reverse: bool\n '
if ('rgroup_df' not in g.graph.keys()):
g.graph['rgroup_df'] = compute_rgroup_dataframe(g.graph['raw_pdb_df'])
sc_centroid = g.graph['rgroup_df'].groupby('node_id').mean()
for (n, d) in g.nodes(data=True):
if (d['residue_name'] == 'GLY'):
vec = np.array([0, 0, 0])
else:
if reverse:
vec = (d['coords'] - np.array(sc_centroid.loc[n][['x_coord', 'y_coord', 'z_coord']]))
else:
vec = (np.array(sc_centroid.loc[n][['x_coord', 'y_coord', 'z_coord']]) - d['coords'])
if scale:
vec = (vec / np.linalg.norm(vec))
d['sidechain_vector'] = vec | Adds vector from node to average position of sidechain atoms.
We compute the mean of the sidechain atoms for each node. For this we use the ``rgroup_df`` dataframe.
If the graph does not contain the ``rgroup_df`` dataframe, we compute it from the ``raw_pdb_df``.
If scale, we scale the vector to the unit vector. If reverse is True,
we reverse the vector (``sidechain - node``). If reverse is false (default) we compute (``node - sidechain``).
:param g: Graph to add vector to.
:type g: nx.Graph
:param scale: Scale vector to unit vector. Defaults to ``True``.
:type scale: bool
:param reverse: Reverse vector. Defaults to ``False``.
:type reverse: bool | graphein/protein/features/nodes/geometry.py | add_sidechain_vector | zibaee-2021/graphein | 0 | python | def add_sidechain_vector(g: nx.Graph, scale: bool=True, reverse: bool=False):
'Adds vector from node to average position of sidechain atoms.\n\n We compute the mean of the sidechain atoms for each node. For this we use the ``rgroup_df`` dataframe.\n If the graph does not contain the ``rgroup_df`` dataframe, we compute it from the ``raw_pdb_df``.\n If scale, we scale the vector to the unit vector. If reverse is True,\n we reverse the vector (``sidechain - node``). If reverse is false (default) we compute (``node - sidechain``).\n\n :param g: Graph to add vector to.\n :type g: nx.Graph\n :param scale: Scale vector to unit vector. Defaults to ``True``.\n :type scale: bool\n :param reverse: Reverse vector. Defaults to ``False``.\n :type reverse: bool\n '
if ('rgroup_df' not in g.graph.keys()):
g.graph['rgroup_df'] = compute_rgroup_dataframe(g.graph['raw_pdb_df'])
sc_centroid = g.graph['rgroup_df'].groupby('node_id').mean()
for (n, d) in g.nodes(data=True):
if (d['residue_name'] == 'GLY'):
vec = np.array([0, 0, 0])
else:
if reverse:
vec = (d['coords'] - np.array(sc_centroid.loc[n][['x_coord', 'y_coord', 'z_coord']]))
else:
vec = (np.array(sc_centroid.loc[n][['x_coord', 'y_coord', 'z_coord']]) - d['coords'])
if scale:
vec = (vec / np.linalg.norm(vec))
d['sidechain_vector'] = vec | def add_sidechain_vector(g: nx.Graph, scale: bool=True, reverse: bool=False):
'Adds vector from node to average position of sidechain atoms.\n\n We compute the mean of the sidechain atoms for each node. For this we use the ``rgroup_df`` dataframe.\n If the graph does not contain the ``rgroup_df`` dataframe, we compute it from the ``raw_pdb_df``.\n If scale, we scale the vector to the unit vector. If reverse is True,\n we reverse the vector (``sidechain - node``). If reverse is false (default) we compute (``node - sidechain``).\n\n :param g: Graph to add vector to.\n :type g: nx.Graph\n :param scale: Scale vector to unit vector. Defaults to ``True``.\n :type scale: bool\n :param reverse: Reverse vector. Defaults to ``False``.\n :type reverse: bool\n '
if ('rgroup_df' not in g.graph.keys()):
g.graph['rgroup_df'] = compute_rgroup_dataframe(g.graph['raw_pdb_df'])
sc_centroid = g.graph['rgroup_df'].groupby('node_id').mean()
for (n, d) in g.nodes(data=True):
if (d['residue_name'] == 'GLY'):
vec = np.array([0, 0, 0])
else:
if reverse:
vec = (d['coords'] - np.array(sc_centroid.loc[n][['x_coord', 'y_coord', 'z_coord']]))
else:
vec = (np.array(sc_centroid.loc[n][['x_coord', 'y_coord', 'z_coord']]) - d['coords'])
if scale:
vec = (vec / np.linalg.norm(vec))
d['sidechain_vector'] = vec<|docstring|>Adds vector from node to average position of sidechain atoms.
We compute the mean of the sidechain atoms for each node. For this we use the ``rgroup_df`` dataframe.
If the graph does not contain the ``rgroup_df`` dataframe, we compute it from the ``raw_pdb_df``.
If scale, we scale the vector to the unit vector. If reverse is True,
we reverse the vector (``sidechain - node``). If reverse is false (default) we compute (``node - sidechain``).
:param g: Graph to add vector to.
:type g: nx.Graph
:param scale: Scale vector to unit vector. Defaults to ``True``.
:type scale: bool
:param reverse: Reverse vector. Defaults to ``False``.
:type reverse: bool<|endoftext|> |
3eca8ec7255e208fd8a0f0dd77346da69c87a3ee4032b2c2e5be0ec35972c697 | def add_beta_carbon_vector(g: nx.Graph, scale: bool=True, reverse: bool=False):
'Adds vector from node (typically alpha carbon) to position of beta carbon.\n\n Glycine does not have a beta carbon, so we set it to ``np.array([0, 0, 0])``.\n We extract the position of the beta carbon from the unprocessed atomic PDB dataframe.\n For this we use the ``raw_pdb_df`` dataframe.\n If scale, we scale the vector to the unit vector. If reverse is True,\n we reverse the vector (``C beta - node``). If reverse is false (default) we compute (``node - C beta``).\n\n :param g: Graph to add vector to.\n :type g: nx.Graph\n :param scale: Scale vector to unit vector. Defaults to ``True``.\n :type scale: bool\n :param reverse: Reverse vector. Defaults to ``False``.\n :type reverse: bool\n '
c_beta_coords = filter_dataframe(g.graph['raw_pdb_df'], 'atom_name', ['CB'], boolean=True)
c_beta_coords.index = c_beta_coords['node_id']
for (n, d) in g.nodes(data=True):
if (d['residue_name'] == 'GLY'):
vec = np.array([0, 0, 0])
else:
if reverse:
vec = (d['coords'] - np.array(c_beta_coords.loc[n][['x_coord', 'y_coord', 'z_coord']]))
else:
vec = (np.array(c_beta_coords.loc[n][['x_coord', 'y_coord', 'z_coord']]) - d['coords'])
if scale:
vec = (vec / np.linalg.norm(vec))
d['c_beta_vector'] = vec | Adds vector from node (typically alpha carbon) to position of beta carbon.
Glycine does not have a beta carbon, so we set it to ``np.array([0, 0, 0])``.
We extract the position of the beta carbon from the unprocessed atomic PDB dataframe.
For this we use the ``raw_pdb_df`` dataframe.
If scale, we scale the vector to the unit vector. If reverse is True,
we reverse the vector (``C beta - node``). If reverse is false (default) we compute (``node - C beta``).
:param g: Graph to add vector to.
:type g: nx.Graph
:param scale: Scale vector to unit vector. Defaults to ``True``.
:type scale: bool
:param reverse: Reverse vector. Defaults to ``False``.
:type reverse: bool | graphein/protein/features/nodes/geometry.py | add_beta_carbon_vector | zibaee-2021/graphein | 0 | python | def add_beta_carbon_vector(g: nx.Graph, scale: bool=True, reverse: bool=False):
'Adds vector from node (typically alpha carbon) to position of beta carbon.\n\n Glycine does not have a beta carbon, so we set it to ``np.array([0, 0, 0])``.\n We extract the position of the beta carbon from the unprocessed atomic PDB dataframe.\n For this we use the ``raw_pdb_df`` dataframe.\n If scale, we scale the vector to the unit vector. If reverse is True,\n we reverse the vector (``C beta - node``). If reverse is false (default) we compute (``node - C beta``).\n\n :param g: Graph to add vector to.\n :type g: nx.Graph\n :param scale: Scale vector to unit vector. Defaults to ``True``.\n :type scale: bool\n :param reverse: Reverse vector. Defaults to ``False``.\n :type reverse: bool\n '
c_beta_coords = filter_dataframe(g.graph['raw_pdb_df'], 'atom_name', ['CB'], boolean=True)
c_beta_coords.index = c_beta_coords['node_id']
for (n, d) in g.nodes(data=True):
if (d['residue_name'] == 'GLY'):
vec = np.array([0, 0, 0])
else:
if reverse:
vec = (d['coords'] - np.array(c_beta_coords.loc[n][['x_coord', 'y_coord', 'z_coord']]))
else:
vec = (np.array(c_beta_coords.loc[n][['x_coord', 'y_coord', 'z_coord']]) - d['coords'])
if scale:
vec = (vec / np.linalg.norm(vec))
d['c_beta_vector'] = vec | def add_beta_carbon_vector(g: nx.Graph, scale: bool=True, reverse: bool=False):
'Adds vector from node (typically alpha carbon) to position of beta carbon.\n\n Glycine does not have a beta carbon, so we set it to ``np.array([0, 0, 0])``.\n We extract the position of the beta carbon from the unprocessed atomic PDB dataframe.\n For this we use the ``raw_pdb_df`` dataframe.\n If scale, we scale the vector to the unit vector. If reverse is True,\n we reverse the vector (``C beta - node``). If reverse is false (default) we compute (``node - C beta``).\n\n :param g: Graph to add vector to.\n :type g: nx.Graph\n :param scale: Scale vector to unit vector. Defaults to ``True``.\n :type scale: bool\n :param reverse: Reverse vector. Defaults to ``False``.\n :type reverse: bool\n '
c_beta_coords = filter_dataframe(g.graph['raw_pdb_df'], 'atom_name', ['CB'], boolean=True)
c_beta_coords.index = c_beta_coords['node_id']
for (n, d) in g.nodes(data=True):
if (d['residue_name'] == 'GLY'):
vec = np.array([0, 0, 0])
else:
if reverse:
vec = (d['coords'] - np.array(c_beta_coords.loc[n][['x_coord', 'y_coord', 'z_coord']]))
else:
vec = (np.array(c_beta_coords.loc[n][['x_coord', 'y_coord', 'z_coord']]) - d['coords'])
if scale:
vec = (vec / np.linalg.norm(vec))
d['c_beta_vector'] = vec<|docstring|>Adds vector from node (typically alpha carbon) to position of beta carbon.
Glycine does not have a beta carbon, so we set it to ``np.array([0, 0, 0])``.
We extract the position of the beta carbon from the unprocessed atomic PDB dataframe.
For this we use the ``raw_pdb_df`` dataframe.
If scale, we scale the vector to the unit vector. If reverse is True,
we reverse the vector (``C beta - node``). If reverse is false (default) we compute (``node - C beta``).
:param g: Graph to add vector to.
:type g: nx.Graph
:param scale: Scale vector to unit vector. Defaults to ``True``.
:type scale: bool
:param reverse: Reverse vector. Defaults to ``False``.
:type reverse: bool<|endoftext|> |
39cea3616100a06e78591d8b91edfa30b95d6454d9104e30abfffb9e3e2ea1e3 | def add_sequence_neighbour_vector(g: nx.Graph, scale: bool=True, reverse: bool=False, n_to_c: bool=True):
'Computes vector from node to adjacent node in sequence.\n Typically used with ``CA`` (alpha carbon) graphs.\n\n If ``n_to_c`` is ``True`` (default), we compute the vectors from the N terminus to the C terminus (canonical direction).\n If ``reverse`` is ``False`` (default), we compute ``Node_i - Node_{i+1}``.\n If ``reverse is ``True``, we compute ``Node_{i+1} - Node_i``.\n :param g: Graph to add vector to.\n :type g: nx.Graph\n :param scale: Scale vector to unit vector. Defaults to ``True``.\n :type scale: bool\n :param reverse: Reverse vector. Defaults to ``False``.\n :type reverse: bool\n :param n_to_c: Compute vector from N to C or C to N. Defaults to ``True``.\n :type n_to_c: bool\n '
suffix = ('n_to_c' if n_to_c else 'c_to_n')
for chain_id in g.graph['chain_ids']:
chain_residues = [(n, v) for (n, v) in g.nodes(data=True) if (v['chain_id'] == chain_id)]
if (not n_to_c):
chain_residues.reverse()
for (i, residue) in enumerate(chain_residues):
if (i == (len(chain_residues) - 1)):
residue[1][f'sequence_neighbour_vector_{suffix}'] = np.array([0, 0, 0])
continue
cond_1 = (residue[1]['chain_id'] == chain_residues[(i + 1)][1]['chain_id'])
cond_2 = (abs((residue[1]['residue_number'] - chain_residues[(i + 1)][1]['residue_number'])) == 1)
if (cond_1 and cond_2):
vec = (chain_residues[(i + 1)][1]['coords'] - residue[1]['coords'])
if reverse:
vec = (- vec)
if scale:
vec = (vec / np.linalg.norm(vec))
residue[1][f'sequence_neighbour_vector_{suffix}'] = vec | Computes vector from node to adjacent node in sequence.
Typically used with ``CA`` (alpha carbon) graphs.
If ``n_to_c`` is ``True`` (default), we compute the vectors from the N terminus to the C terminus (canonical direction).
If ``reverse`` is ``False`` (default), we compute ``Node_i - Node_{i+1}``.
If ``reverse is ``True``, we compute ``Node_{i+1} - Node_i``.
:param g: Graph to add vector to.
:type g: nx.Graph
:param scale: Scale vector to unit vector. Defaults to ``True``.
:type scale: bool
:param reverse: Reverse vector. Defaults to ``False``.
:type reverse: bool
:param n_to_c: Compute vector from N to C or C to N. Defaults to ``True``.
:type n_to_c: bool | graphein/protein/features/nodes/geometry.py | add_sequence_neighbour_vector | zibaee-2021/graphein | 0 | python | def add_sequence_neighbour_vector(g: nx.Graph, scale: bool=True, reverse: bool=False, n_to_c: bool=True):
'Computes vector from node to adjacent node in sequence.\n Typically used with ``CA`` (alpha carbon) graphs.\n\n If ``n_to_c`` is ``True`` (default), we compute the vectors from the N terminus to the C terminus (canonical direction).\n If ``reverse`` is ``False`` (default), we compute ``Node_i - Node_{i+1}``.\n If ``reverse is ``True``, we compute ``Node_{i+1} - Node_i``.\n :param g: Graph to add vector to.\n :type g: nx.Graph\n :param scale: Scale vector to unit vector. Defaults to ``True``.\n :type scale: bool\n :param reverse: Reverse vector. Defaults to ``False``.\n :type reverse: bool\n :param n_to_c: Compute vector from N to C or C to N. Defaults to ``True``.\n :type n_to_c: bool\n '
suffix = ('n_to_c' if n_to_c else 'c_to_n')
for chain_id in g.graph['chain_ids']:
chain_residues = [(n, v) for (n, v) in g.nodes(data=True) if (v['chain_id'] == chain_id)]
if (not n_to_c):
chain_residues.reverse()
for (i, residue) in enumerate(chain_residues):
if (i == (len(chain_residues) - 1)):
residue[1][f'sequence_neighbour_vector_{suffix}'] = np.array([0, 0, 0])
continue
cond_1 = (residue[1]['chain_id'] == chain_residues[(i + 1)][1]['chain_id'])
cond_2 = (abs((residue[1]['residue_number'] - chain_residues[(i + 1)][1]['residue_number'])) == 1)
if (cond_1 and cond_2):
vec = (chain_residues[(i + 1)][1]['coords'] - residue[1]['coords'])
if reverse:
vec = (- vec)
if scale:
vec = (vec / np.linalg.norm(vec))
residue[1][f'sequence_neighbour_vector_{suffix}'] = vec | def add_sequence_neighbour_vector(g: nx.Graph, scale: bool=True, reverse: bool=False, n_to_c: bool=True):
'Computes vector from node to adjacent node in sequence.\n Typically used with ``CA`` (alpha carbon) graphs.\n\n If ``n_to_c`` is ``True`` (default), we compute the vectors from the N terminus to the C terminus (canonical direction).\n If ``reverse`` is ``False`` (default), we compute ``Node_i - Node_{i+1}``.\n If ``reverse is ``True``, we compute ``Node_{i+1} - Node_i``.\n :param g: Graph to add vector to.\n :type g: nx.Graph\n :param scale: Scale vector to unit vector. Defaults to ``True``.\n :type scale: bool\n :param reverse: Reverse vector. Defaults to ``False``.\n :type reverse: bool\n :param n_to_c: Compute vector from N to C or C to N. Defaults to ``True``.\n :type n_to_c: bool\n '
suffix = ('n_to_c' if n_to_c else 'c_to_n')
for chain_id in g.graph['chain_ids']:
chain_residues = [(n, v) for (n, v) in g.nodes(data=True) if (v['chain_id'] == chain_id)]
if (not n_to_c):
chain_residues.reverse()
for (i, residue) in enumerate(chain_residues):
if (i == (len(chain_residues) - 1)):
residue[1][f'sequence_neighbour_vector_{suffix}'] = np.array([0, 0, 0])
continue
cond_1 = (residue[1]['chain_id'] == chain_residues[(i + 1)][1]['chain_id'])
cond_2 = (abs((residue[1]['residue_number'] - chain_residues[(i + 1)][1]['residue_number'])) == 1)
if (cond_1 and cond_2):
vec = (chain_residues[(i + 1)][1]['coords'] - residue[1]['coords'])
if reverse:
vec = (- vec)
if scale:
vec = (vec / np.linalg.norm(vec))
residue[1][f'sequence_neighbour_vector_{suffix}'] = vec<|docstring|>Computes vector from node to adjacent node in sequence.
Typically used with ``CA`` (alpha carbon) graphs.
If ``n_to_c`` is ``True`` (default), we compute the vectors from the N terminus to the C terminus (canonical direction).
If ``reverse`` is ``False`` (default), we compute ``Node_i - Node_{i+1}``.
If ``reverse is ``True``, we compute ``Node_{i+1} - Node_i``.
:param g: Graph to add vector to.
:type g: nx.Graph
:param scale: Scale vector to unit vector. Defaults to ``True``.
:type scale: bool
:param reverse: Reverse vector. Defaults to ``False``.
:type reverse: bool
:param n_to_c: Compute vector from N to C or C to N. Defaults to ``True``.
:type n_to_c: bool<|endoftext|> |
7cb6cf306a67ac1e5b52649a8789653090700a91794e1ff3a29cf8ec9485c77f | def __init__(self, name: str, transformer: Callable=None, download_if_missing: bool=True, data_folder: str=None, **kwargs):
'\n An abstract class used to download the datasets.\n :param name: The name of the dataset.\n :param transformer: The transformer function used when a sample is retrieved.\n :param download_if_missing: If the dataset needs to be downloaded if missing.\n :param data_folder: Where the dataset is stored.\n '
if (data_folder is None):
data_folder = join(dirname(__file__), 'downloaded_datasets', name)
self.data_folder = data_folder
self._name = name
self.transformer = (transformer if (transformer is not None) else (lambda x: x))
missing = (not self._check_exists())
if missing:
if (not download_if_missing):
raise IOError('Data not found and `download_if_missing` is False')
else:
if (not exists(self.data_folder)):
makedirs(self.data_folder)
print('Downloading dataset {}'.format(self.name))
self.download_dataset() | An abstract class used to download the datasets.
:param name: The name of the dataset.
:param transformer: The transformer function used when a sample is retrieved.
:param download_if_missing: If the dataset needs to be downloaded if missing.
:param data_folder: Where the dataset is stored. | continual_learning/datasets/base/utils.py | __init__ | jaryP/ContinualAI | 0 | python | def __init__(self, name: str, transformer: Callable=None, download_if_missing: bool=True, data_folder: str=None, **kwargs):
'\n An abstract class used to download the datasets.\n :param name: The name of the dataset.\n :param transformer: The transformer function used when a sample is retrieved.\n :param download_if_missing: If the dataset needs to be downloaded if missing.\n :param data_folder: Where the dataset is stored.\n '
if (data_folder is None):
data_folder = join(dirname(__file__), 'downloaded_datasets', name)
self.data_folder = data_folder
self._name = name
self.transformer = (transformer if (transformer is not None) else (lambda x: x))
missing = (not self._check_exists())
if missing:
if (not download_if_missing):
raise IOError('Data not found and `download_if_missing` is False')
else:
if (not exists(self.data_folder)):
makedirs(self.data_folder)
print('Downloading dataset {}'.format(self.name))
self.download_dataset() | def __init__(self, name: str, transformer: Callable=None, download_if_missing: bool=True, data_folder: str=None, **kwargs):
'\n An abstract class used to download the datasets.\n :param name: The name of the dataset.\n :param transformer: The transformer function used when a sample is retrieved.\n :param download_if_missing: If the dataset needs to be downloaded if missing.\n :param data_folder: Where the dataset is stored.\n '
if (data_folder is None):
data_folder = join(dirname(__file__), 'downloaded_datasets', name)
self.data_folder = data_folder
self._name = name
self.transformer = (transformer if (transformer is not None) else (lambda x: x))
missing = (not self._check_exists())
if missing:
if (not download_if_missing):
raise IOError('Data not found and `download_if_missing` is False')
else:
if (not exists(self.data_folder)):
makedirs(self.data_folder)
print('Downloading dataset {}'.format(self.name))
self.download_dataset()<|docstring|>An abstract class used to download the datasets.
:param name: The name of the dataset.
:param transformer: The transformer function used when a sample is retrieved.
:param download_if_missing: If the dataset needs to be downloaded if missing.
:param data_folder: Where the dataset is stored.<|endoftext|> |
1460e6a744370e7785057017315f1d436cd3c8ac802d4b47a924a423683f4962 | def getNk(self, wavelength):
'\n The {wavelength, n, k} is linearly interpolated.\n '
nk = complex(1, 0)
if (wavelength <= 0.0):
return nk
if (wavelength >= self.nk[(- 1)][0]):
nk = self.nk[(- 1)][1]
elif (wavelength <= self.nk[0][0]):
nk = self.nk[0][1]
else:
i = 0
while ((i < len(self.nk)) and (wavelength > self.nk[i][0])):
i += 1
if (i == (len(self.nk) - 1)):
nk = self.nk[(- 1)][1]
else:
d1 = (wavelength - self.nk[i][0])
d2 = (self.nk[(i + 1)][0] - wavelength)
nk = (((self.nk[i][1] * d2) + (self.nk[(i + 1)][1] * d1)) / (d1 + d2))
if (self.gain != None):
imag = (((0.0 - self.gain) * (wavelength * 1e-07)) / (4.0 * math.pi))
return complex(nk.real, imag)
else:
return nk | The {wavelength, n, k} is linearly interpolated. | MultilayerOptics/Optics.py | getNk | KanglinXiong/multilayerOptics | 0 | python | def getNk(self, wavelength):
'\n \n '
nk = complex(1, 0)
if (wavelength <= 0.0):
return nk
if (wavelength >= self.nk[(- 1)][0]):
nk = self.nk[(- 1)][1]
elif (wavelength <= self.nk[0][0]):
nk = self.nk[0][1]
else:
i = 0
while ((i < len(self.nk)) and (wavelength > self.nk[i][0])):
i += 1
if (i == (len(self.nk) - 1)):
nk = self.nk[(- 1)][1]
else:
d1 = (wavelength - self.nk[i][0])
d2 = (self.nk[(i + 1)][0] - wavelength)
nk = (((self.nk[i][1] * d2) + (self.nk[(i + 1)][1] * d1)) / (d1 + d2))
if (self.gain != None):
imag = (((0.0 - self.gain) * (wavelength * 1e-07)) / (4.0 * math.pi))
return complex(nk.real, imag)
else:
return nk | def getNk(self, wavelength):
'\n \n '
nk = complex(1, 0)
if (wavelength <= 0.0):
return nk
if (wavelength >= self.nk[(- 1)][0]):
nk = self.nk[(- 1)][1]
elif (wavelength <= self.nk[0][0]):
nk = self.nk[0][1]
else:
i = 0
while ((i < len(self.nk)) and (wavelength > self.nk[i][0])):
i += 1
if (i == (len(self.nk) - 1)):
nk = self.nk[(- 1)][1]
else:
d1 = (wavelength - self.nk[i][0])
d2 = (self.nk[(i + 1)][0] - wavelength)
nk = (((self.nk[i][1] * d2) + (self.nk[(i + 1)][1] * d1)) / (d1 + d2))
if (self.gain != None):
imag = (((0.0 - self.gain) * (wavelength * 1e-07)) / (4.0 * math.pi))
return complex(nk.real, imag)
else:
return nk<|docstring|>The {wavelength, n, k} is linearly interpolated.<|endoftext|> |
54c2861ce00220303aadf87a188032211c0e15d1b3f68d22f9ad063c1c907a7b | def setNk(self, filename):
'\n The index file is in the format of {wavelength, n, k} with increasing wavelength. \n '
if (not os.path.exists(Misc.getNkfilename(filename))):
self.nk = Misc.getTernaryAlloyNk(filename.replace('.txt', '').strip())
return
self.nk = []
fileObj = open(Misc.getNkfilename(filename), mode='rt', newline='\n')
fileObj.readline()
for line in fileObj:
tmp = line.replace('\t', ' ').replace(',', ' ').split(' ')
self.nk.append([float(tmp[0]), complex(float(tmp[1]), float(tmp[2]))]) | The index file is in the format of {wavelength, n, k} with increasing wavelength. | MultilayerOptics/Optics.py | setNk | KanglinXiong/multilayerOptics | 0 | python | def setNk(self, filename):
'\n \n '
if (not os.path.exists(Misc.getNkfilename(filename))):
self.nk = Misc.getTernaryAlloyNk(filename.replace('.txt', ).strip())
return
self.nk = []
fileObj = open(Misc.getNkfilename(filename), mode='rt', newline='\n')
fileObj.readline()
for line in fileObj:
tmp = line.replace('\t', ' ').replace(',', ' ').split(' ')
self.nk.append([float(tmp[0]), complex(float(tmp[1]), float(tmp[2]))]) | def setNk(self, filename):
'\n \n '
if (not os.path.exists(Misc.getNkfilename(filename))):
self.nk = Misc.getTernaryAlloyNk(filename.replace('.txt', ).strip())
return
self.nk = []
fileObj = open(Misc.getNkfilename(filename), mode='rt', newline='\n')
fileObj.readline()
for line in fileObj:
tmp = line.replace('\t', ' ').replace(',', ' ').split(' ')
self.nk.append([float(tmp[0]), complex(float(tmp[1]), float(tmp[2]))])<|docstring|>The index file is in the format of {wavelength, n, k} with increasing wavelength.<|endoftext|> |
e1d582f4e607e303160af07c12e847572e7f0ac353d603c25ce8df06ba8dcde4 | def setGain(self, g):
'\n set gain to g (1/cm)\n '
self.gain = g | set gain to g (1/cm) | MultilayerOptics/Optics.py | setGain | KanglinXiong/multilayerOptics | 0 | python | def setGain(self, g):
'\n \n '
self.gain = g | def setGain(self, g):
'\n \n '
self.gain = g<|docstring|>set gain to g (1/cm)<|endoftext|> |
42c307e81e3e395920d17a3f9bb7ea520f29f96676b1e328d7ac34f867f4361e | def setAbsorption(self, a):
'\n set gain to -a (1/cm)\n '
self.gain = (0.0 - a) | set gain to -a (1/cm) | MultilayerOptics/Optics.py | setAbsorption | KanglinXiong/multilayerOptics | 0 | python | def setAbsorption(self, a):
'\n \n '
self.gain = (0.0 - a) | def setAbsorption(self, a):
'\n \n '
self.gain = (0.0 - a)<|docstring|>set gain to -a (1/cm)<|endoftext|> |
062b30168ae341d9439157b59216e7ab326893ac20de8244132af95457404059 | def getElec(self, displace=0):
'\n n affects phase, k affects amplitude.\n Displacement is -thickness/2 for left, 0 for center, thickness/2 for right.\n '
nk = self.material.getNk(self.wavelength)
waveVec = (((2 * math.pi) * nk) / self.wavelength)
complexPhase = ((waveVec * (displace / math.cos(self.angle))) * complex(0, 1))
El = copy.deepcopy(self.El)
Er = copy.deepcopy(self.Er)
El[0] = (El[0] / cmath.exp(complexPhase))
Er[0] = (Er[0] * cmath.exp(complexPhase))
El[1] = (El[1] - complexPhase.imag)
Er[1] = (Er[1] + complexPhase.imag)
return (El, Er) | n affects phase, k affects amplitude.
Displacement is -thickness/2 for left, 0 for center, thickness/2 for right. | MultilayerOptics/Optics.py | getElec | KanglinXiong/multilayerOptics | 0 | python | def getElec(self, displace=0):
'\n n affects phase, k affects amplitude.\n Displacement is -thickness/2 for left, 0 for center, thickness/2 for right.\n '
nk = self.material.getNk(self.wavelength)
waveVec = (((2 * math.pi) * nk) / self.wavelength)
complexPhase = ((waveVec * (displace / math.cos(self.angle))) * complex(0, 1))
El = copy.deepcopy(self.El)
Er = copy.deepcopy(self.Er)
El[0] = (El[0] / cmath.exp(complexPhase))
Er[0] = (Er[0] * cmath.exp(complexPhase))
El[1] = (El[1] - complexPhase.imag)
Er[1] = (Er[1] + complexPhase.imag)
return (El, Er) | def getElec(self, displace=0):
'\n n affects phase, k affects amplitude.\n Displacement is -thickness/2 for left, 0 for center, thickness/2 for right.\n '
nk = self.material.getNk(self.wavelength)
waveVec = (((2 * math.pi) * nk) / self.wavelength)
complexPhase = ((waveVec * (displace / math.cos(self.angle))) * complex(0, 1))
El = copy.deepcopy(self.El)
Er = copy.deepcopy(self.Er)
El[0] = (El[0] / cmath.exp(complexPhase))
Er[0] = (Er[0] * cmath.exp(complexPhase))
El[1] = (El[1] - complexPhase.imag)
Er[1] = (Er[1] + complexPhase.imag)
return (El, Er)<|docstring|>n affects phase, k affects amplitude.
Displacement is -thickness/2 for left, 0 for center, thickness/2 for right.<|endoftext|> |
eb240647bf6bf31cc74099ce9498101292b9ccbf9f8fae8f2dd2e1231ae6deb0 | def setElec(self, El, Er, displace=0):
'\n Set El and Er at the layer center by values at given displacement.\n '
nk = self.material.getNk(self.wavelength)
waveVec = (((2 * math.pi) * nk) / self.wavelength)
complexPhase = ((waveVec * ((0.0 - displace) / math.cos(self.angle))) * complex(0, 1))
self.El[0] = (El[0] / cmath.exp(complexPhase))
self.Er[0] = (Er[0] * cmath.exp(complexPhase))
self.El[1] = (El[1] - complexPhase.imag)
self.Er[1] = (Er[1] + complexPhase.imag) | Set El and Er at the layer center by values at given displacement. | MultilayerOptics/Optics.py | setElec | KanglinXiong/multilayerOptics | 0 | python | def setElec(self, El, Er, displace=0):
'\n \n '
nk = self.material.getNk(self.wavelength)
waveVec = (((2 * math.pi) * nk) / self.wavelength)
complexPhase = ((waveVec * ((0.0 - displace) / math.cos(self.angle))) * complex(0, 1))
self.El[0] = (El[0] / cmath.exp(complexPhase))
self.Er[0] = (Er[0] * cmath.exp(complexPhase))
self.El[1] = (El[1] - complexPhase.imag)
self.Er[1] = (Er[1] + complexPhase.imag) | def setElec(self, El, Er, displace=0):
'\n \n '
nk = self.material.getNk(self.wavelength)
waveVec = (((2 * math.pi) * nk) / self.wavelength)
complexPhase = ((waveVec * ((0.0 - displace) / math.cos(self.angle))) * complex(0, 1))
self.El[0] = (El[0] / cmath.exp(complexPhase))
self.Er[0] = (Er[0] * cmath.exp(complexPhase))
self.El[1] = (El[1] - complexPhase.imag)
self.Er[1] = (Er[1] + complexPhase.imag)<|docstring|>Set El and Er at the layer center by values at given displacement.<|endoftext|> |
f5c55947cdfe14229dea93f4826ca92d285f8b492e69634943d14af76cabad6f | def setWavelength(self, wavelength=520):
'\n Set wavelength of each OptLayer instance.\n '
self.wavelength = wavelength
for i in range(len(self.struct)):
self.struct[i].wavelength = self.wavelength | Set wavelength of each OptLayer instance. | MultilayerOptics/Optics.py | setWavelength | KanglinXiong/multilayerOptics | 0 | python | def setWavelength(self, wavelength=520):
'\n \n '
self.wavelength = wavelength
for i in range(len(self.struct)):
self.struct[i].wavelength = self.wavelength | def setWavelength(self, wavelength=520):
'\n \n '
self.wavelength = wavelength
for i in range(len(self.struct)):
self.struct[i].wavelength = self.wavelength<|docstring|>Set wavelength of each OptLayer instance.<|endoftext|> |
2176567dab7853d3e92b19a16e66257eb0ac9cbd1a85c09997eabedfe325bf27 | def setStruct(self):
'\n The self.struct is a list of OptLayer instances, [optLayer1, optLayer2, ...].\n '
tmpMatList = []
for lay in self.stack:
if (not (lay[0] in tmpMatList)):
tmpMatList.append(lay[0])
tmpMatDict = []
for mat in tmpMatList:
tmpMatDict.append([mat, OptMaterial(mat)])
self.matDict = dict(tmpMatDict)
self.struct = []
for lay in self.stack:
self.struct.append(OptLayer(self.matDict[lay[0]]))
self.struct[(- 1)].thickness = lay[1] | The self.struct is a list of OptLayer instances, [optLayer1, optLayer2, ...]. | MultilayerOptics/Optics.py | setStruct | KanglinXiong/multilayerOptics | 0 | python | def setStruct(self):
'\n \n '
tmpMatList = []
for lay in self.stack:
if (not (lay[0] in tmpMatList)):
tmpMatList.append(lay[0])
tmpMatDict = []
for mat in tmpMatList:
tmpMatDict.append([mat, OptMaterial(mat)])
self.matDict = dict(tmpMatDict)
self.struct = []
for lay in self.stack:
self.struct.append(OptLayer(self.matDict[lay[0]]))
self.struct[(- 1)].thickness = lay[1] | def setStruct(self):
'\n \n '
tmpMatList = []
for lay in self.stack:
if (not (lay[0] in tmpMatList)):
tmpMatList.append(lay[0])
tmpMatDict = []
for mat in tmpMatList:
tmpMatDict.append([mat, OptMaterial(mat)])
self.matDict = dict(tmpMatDict)
self.struct = []
for lay in self.stack:
self.struct.append(OptLayer(self.matDict[lay[0]]))
self.struct[(- 1)].thickness = lay[1]<|docstring|>The self.struct is a list of OptLayer instances, [optLayer1, optLayer2, ...].<|endoftext|> |
fc586c5c268600c50563dfc3f5da3762bed66ba59fa11946a170da766f6c9f0d | def setStack(self, filename):
'\n Parse the structure file to set the self.stack.\n The self.stack is a list of material and thickness, [[mat1, d1], [mat2, d2], ...].\n '
fileObj = open(Misc.getScriptfilename(filename), mode='rt', newline='\n')
for line in fileObj:
line = line.strip()
if ((not len(line)) or ('#' in line)):
continue
line = line.expandtabs(1)
while ((' ' * 2) in line):
line = line.replace((' ' * 2), ' ')
layers = line.partition('{')[2].partition('}')[0]
repeat = line.partition('*')[2].strip()
gradedLayerList = re.findall('\\[.*?\\]', layers)
for gradedLayer in gradedLayerList:
layers = layers.replace(gradedLayer, Misc.discretizeGradedLayer(gradedLayer))
tmpStack = []
if len(repeat):
repeat = int(repeat)
else:
repeat = 1
for lay in layers.split(','):
lay = lay.strip().split(' ')
lay[1] = float(lay[1])
tmpStack.append(lay)
for lay in (tmpStack * repeat):
self.stack.append(lay) | Parse the structure file to set the self.stack.
The self.stack is a list of material and thickness, [[mat1, d1], [mat2, d2], ...]. | MultilayerOptics/Optics.py | setStack | KanglinXiong/multilayerOptics | 0 | python | def setStack(self, filename):
'\n Parse the structure file to set the self.stack.\n The self.stack is a list of material and thickness, [[mat1, d1], [mat2, d2], ...].\n '
fileObj = open(Misc.getScriptfilename(filename), mode='rt', newline='\n')
for line in fileObj:
line = line.strip()
if ((not len(line)) or ('#' in line)):
continue
line = line.expandtabs(1)
while ((' ' * 2) in line):
line = line.replace((' ' * 2), ' ')
layers = line.partition('{')[2].partition('}')[0]
repeat = line.partition('*')[2].strip()
gradedLayerList = re.findall('\\[.*?\\]', layers)
for gradedLayer in gradedLayerList:
layers = layers.replace(gradedLayer, Misc.discretizeGradedLayer(gradedLayer))
tmpStack = []
if len(repeat):
repeat = int(repeat)
else:
repeat = 1
for lay in layers.split(','):
lay = lay.strip().split(' ')
lay[1] = float(lay[1])
tmpStack.append(lay)
for lay in (tmpStack * repeat):
self.stack.append(lay) | def setStack(self, filename):
'\n Parse the structure file to set the self.stack.\n The self.stack is a list of material and thickness, [[mat1, d1], [mat2, d2], ...].\n '
fileObj = open(Misc.getScriptfilename(filename), mode='rt', newline='\n')
for line in fileObj:
line = line.strip()
if ((not len(line)) or ('#' in line)):
continue
line = line.expandtabs(1)
while ((' ' * 2) in line):
line = line.replace((' ' * 2), ' ')
layers = line.partition('{')[2].partition('}')[0]
repeat = line.partition('*')[2].strip()
gradedLayerList = re.findall('\\[.*?\\]', layers)
for gradedLayer in gradedLayerList:
layers = layers.replace(gradedLayer, Misc.discretizeGradedLayer(gradedLayer))
tmpStack = []
if len(repeat):
repeat = int(repeat)
else:
repeat = 1
for lay in layers.split(','):
lay = lay.strip().split(' ')
lay[1] = float(lay[1])
tmpStack.append(lay)
for lay in (tmpStack * repeat):
self.stack.append(lay)<|docstring|>Parse the structure file to set the self.stack.
The self.stack is a list of material and thickness, [[mat1, d1], [mat2, d2], ...].<|endoftext|> |
85ea8b048fa32da27d94c6294f833bc54024b94e1550998d37abc1c5da27894e | def getOpticalLength(self, wavelength=520):
' sum of n*d of all OptLayer '
self.setWavelength(wavelength)
optLen = 0.0
for i in range(len(self.struct)):
optLen += (self.struct[i].getNk() * self.struct[i].getThickness())
return optLen.real | sum of n*d of all OptLayer | MultilayerOptics/Optics.py | getOpticalLength | KanglinXiong/multilayerOptics | 0 | python | def getOpticalLength(self, wavelength=520):
' '
self.setWavelength(wavelength)
optLen = 0.0
for i in range(len(self.struct)):
optLen += (self.struct[i].getNk() * self.struct[i].getThickness())
return optLen.real | def getOpticalLength(self, wavelength=520):
' '
self.setWavelength(wavelength)
optLen = 0.0
for i in range(len(self.struct)):
optLen += (self.struct[i].getNk() * self.struct[i].getThickness())
return optLen.real<|docstring|>sum of n*d of all OptLayer<|endoftext|> |
ee7c2b7902d426e93d75784597ecf4ee25eb427e230f25f2811991880498e706 | def getInterfaceMatrix(self, nk1, theta1, nk2, theta2):
"\n Get transfer matrix for interface, by the boundary requirements of Maxwell's equations.\n Label 1 for input side, 2 for output side.\n The angle theta1 and theta2 should be calculated somewhere else by Snell's law.\n "
if (self.polarization == self.S):
m1122 = ((cmath.cos(theta2) * nk2) - (cmath.cos(theta1) * nk1))
m1221 = ((cmath.cos(theta2) * nk2) + (cmath.cos(theta1) * nk1))
m0 = ((2.0 * cmath.cos(theta2)) * nk2)
return [[(m1122 / m0), (m1221 / m0)], [(m1221 / m0), (m1122 / m0)]]
else:
m1122 = ((cmath.cos(theta2) * nk1) - (cmath.cos(theta1) * nk2))
m1221 = ((cmath.cos(theta2) * nk1) + (cmath.cos(theta1) * nk2))
m0 = ((2.0 * cmath.cos(theta2)) * nk2)
return [[(m1122 / m0), (m1221 / m0)], [(m1221 / m0), (m1122 / m0)]] | Get transfer matrix for interface, by the boundary requirements of Maxwell's equations.
Label 1 for input side, 2 for output side.
The angle theta1 and theta2 should be calculated somewhere else by Snell's law. | MultilayerOptics/Optics.py | getInterfaceMatrix | KanglinXiong/multilayerOptics | 0 | python | def getInterfaceMatrix(self, nk1, theta1, nk2, theta2):
"\n Get transfer matrix for interface, by the boundary requirements of Maxwell's equations.\n Label 1 for input side, 2 for output side.\n The angle theta1 and theta2 should be calculated somewhere else by Snell's law.\n "
if (self.polarization == self.S):
m1122 = ((cmath.cos(theta2) * nk2) - (cmath.cos(theta1) * nk1))
m1221 = ((cmath.cos(theta2) * nk2) + (cmath.cos(theta1) * nk1))
m0 = ((2.0 * cmath.cos(theta2)) * nk2)
return [[(m1122 / m0), (m1221 / m0)], [(m1221 / m0), (m1122 / m0)]]
else:
m1122 = ((cmath.cos(theta2) * nk1) - (cmath.cos(theta1) * nk2))
m1221 = ((cmath.cos(theta2) * nk1) + (cmath.cos(theta1) * nk2))
m0 = ((2.0 * cmath.cos(theta2)) * nk2)
return [[(m1122 / m0), (m1221 / m0)], [(m1221 / m0), (m1122 / m0)]] | def getInterfaceMatrix(self, nk1, theta1, nk2, theta2):
"\n Get transfer matrix for interface, by the boundary requirements of Maxwell's equations.\n Label 1 for input side, 2 for output side.\n The angle theta1 and theta2 should be calculated somewhere else by Snell's law.\n "
if (self.polarization == self.S):
m1122 = ((cmath.cos(theta2) * nk2) - (cmath.cos(theta1) * nk1))
m1221 = ((cmath.cos(theta2) * nk2) + (cmath.cos(theta1) * nk1))
m0 = ((2.0 * cmath.cos(theta2)) * nk2)
return [[(m1122 / m0), (m1221 / m0)], [(m1221 / m0), (m1122 / m0)]]
else:
m1122 = ((cmath.cos(theta2) * nk1) - (cmath.cos(theta1) * nk2))
m1221 = ((cmath.cos(theta2) * nk1) + (cmath.cos(theta1) * nk2))
m0 = ((2.0 * cmath.cos(theta2)) * nk2)
return [[(m1122 / m0), (m1221 / m0)], [(m1221 / m0), (m1122 / m0)]]<|docstring|>Get transfer matrix for interface, by the boundary requirements of Maxwell's equations.
Label 1 for input side, 2 for output side.
The angle theta1 and theta2 should be calculated somewhere else by Snell's law.<|endoftext|> |
3b54b0d100dbfff59722a11e977497db32c53c8ee4a9371cae81d3ec3413ba02 | def calcElecOfRightLayByLeftLay(self, leftLay, rightLay):
'\n The phase is calculated in the accumulated way.\n '
m = self.getInterfaceMatrix(leftLay.getNk(), leftLay.getAngle(), rightLay.getNk(), rightLay.getAngle())
[inEl, inEr] = leftLay.getRightSideElec()
[inWl, inWr] = [inEl[0], inEr[0]]
[outWl, outWr] = [((m[0][0] * inWr) + (m[0][1] * inWl)), ((m[1][0] * inWr) + (m[1][1] * inWl))]
[outEl, outEr] = [[outWl, cmath.phase(outWl)], [outWr, cmath.phase(outWr)]]
while ((outEl[1] > inEl[1]) and (inEl[0] != 0.0)):
outEl[1] -= (2.0 * math.pi)
while ((outEr[1] < inEr[1]) and (inEr[0] != 0.0)):
outEr[1] += (2.0 * math.pi)
rightLay.setElec(outEl, outEr, (0.0 - (rightLay.thickness / 2.0))) | The phase is calculated in the accumulated way. | MultilayerOptics/Optics.py | calcElecOfRightLayByLeftLay | KanglinXiong/multilayerOptics | 0 | python | def calcElecOfRightLayByLeftLay(self, leftLay, rightLay):
'\n \n '
m = self.getInterfaceMatrix(leftLay.getNk(), leftLay.getAngle(), rightLay.getNk(), rightLay.getAngle())
[inEl, inEr] = leftLay.getRightSideElec()
[inWl, inWr] = [inEl[0], inEr[0]]
[outWl, outWr] = [((m[0][0] * inWr) + (m[0][1] * inWl)), ((m[1][0] * inWr) + (m[1][1] * inWl))]
[outEl, outEr] = [[outWl, cmath.phase(outWl)], [outWr, cmath.phase(outWr)]]
while ((outEl[1] > inEl[1]) and (inEl[0] != 0.0)):
outEl[1] -= (2.0 * math.pi)
while ((outEr[1] < inEr[1]) and (inEr[0] != 0.0)):
outEr[1] += (2.0 * math.pi)
rightLay.setElec(outEl, outEr, (0.0 - (rightLay.thickness / 2.0))) | def calcElecOfRightLayByLeftLay(self, leftLay, rightLay):
'\n \n '
m = self.getInterfaceMatrix(leftLay.getNk(), leftLay.getAngle(), rightLay.getNk(), rightLay.getAngle())
[inEl, inEr] = leftLay.getRightSideElec()
[inWl, inWr] = [inEl[0], inEr[0]]
[outWl, outWr] = [((m[0][0] * inWr) + (m[0][1] * inWl)), ((m[1][0] * inWr) + (m[1][1] * inWl))]
[outEl, outEr] = [[outWl, cmath.phase(outWl)], [outWr, cmath.phase(outWr)]]
while ((outEl[1] > inEl[1]) and (inEl[0] != 0.0)):
outEl[1] -= (2.0 * math.pi)
while ((outEr[1] < inEr[1]) and (inEr[0] != 0.0)):
outEr[1] += (2.0 * math.pi)
rightLay.setElec(outEl, outEr, (0.0 - (rightLay.thickness / 2.0)))<|docstring|>The phase is calculated in the accumulated way.<|endoftext|> |
6c147a0691cbdad67428689846e9f76c08f3abaefddb0dce61b537185bce4d21 | def getLeftIncidentByRightExit(self, wavelength=520):
'\n Given the right most, compute the left most.\n '
self.setWavelength(wavelength)
self.struct[(- 1)].setElec([0, 0], [1.0, 0])
i = (- 1)
while (i > (0 - len(self.struct))):
self.calcElecOfLeftLayByRightLay(self.struct[i], self.struct[(i - 1)])
i -= 1
[ref, inc] = self.struct[0].getRightSideElec()
return [inc, ref] | Given the right most, compute the left most. | MultilayerOptics/Optics.py | getLeftIncidentByRightExit | KanglinXiong/multilayerOptics | 0 | python | def getLeftIncidentByRightExit(self, wavelength=520):
'\n \n '
self.setWavelength(wavelength)
self.struct[(- 1)].setElec([0, 0], [1.0, 0])
i = (- 1)
while (i > (0 - len(self.struct))):
self.calcElecOfLeftLayByRightLay(self.struct[i], self.struct[(i - 1)])
i -= 1
[ref, inc] = self.struct[0].getRightSideElec()
return [inc, ref] | def getLeftIncidentByRightExit(self, wavelength=520):
'\n \n '
self.setWavelength(wavelength)
self.struct[(- 1)].setElec([0, 0], [1.0, 0])
i = (- 1)
while (i > (0 - len(self.struct))):
self.calcElecOfLeftLayByRightLay(self.struct[i], self.struct[(i - 1)])
i -= 1
[ref, inc] = self.struct[0].getRightSideElec()
return [inc, ref]<|docstring|>Given the right most, compute the left most.<|endoftext|> |
61c4f47e24f53f583b3f3a127f5da184409cc24443df2c6ee625c5454accf7c0 | def getRightIncidentByLeftExit(self, wavelength=520):
'\n Given the left most, compute the right most.\n '
self.setWavelength(wavelength)
self.struct[0].setElec([1.0, 0], [0, 0])
i = 0
while (i < (len(self.struct) - 1)):
self.calcElecOfRightLayByLeftLay(self.struct[i], self.struct[(i + 1)])
i += 1
[inc, ref] = self.struct[(- 1)].getLeftSideElec()
return [inc, ref] | Given the left most, compute the right most. | MultilayerOptics/Optics.py | getRightIncidentByLeftExit | KanglinXiong/multilayerOptics | 0 | python | def getRightIncidentByLeftExit(self, wavelength=520):
'\n \n '
self.setWavelength(wavelength)
self.struct[0].setElec([1.0, 0], [0, 0])
i = 0
while (i < (len(self.struct) - 1)):
self.calcElecOfRightLayByLeftLay(self.struct[i], self.struct[(i + 1)])
i += 1
[inc, ref] = self.struct[(- 1)].getLeftSideElec()
return [inc, ref] | def getRightIncidentByLeftExit(self, wavelength=520):
'\n \n '
self.setWavelength(wavelength)
self.struct[0].setElec([1.0, 0], [0, 0])
i = 0
while (i < (len(self.struct) - 1)):
self.calcElecOfRightLayByLeftLay(self.struct[i], self.struct[(i + 1)])
i += 1
[inc, ref] = self.struct[(- 1)].getLeftSideElec()
return [inc, ref]<|docstring|>Given the left most, compute the right most.<|endoftext|> |
d7409eddd508b7717e555c4ea9e9e1d5982d70b7692caf72601871e290a92d4e | def getReflectOfLeftSurface(self, wavelength=520):
'\n Get the reflectivity of the left surface of the structure.\n '
[inc, ref] = self.getLeftIncidentByRightExit(wavelength)
if (abs(inc[0]) != 0.0):
return ((abs((ref[0] / inc[0])) ** 2.0), (ref[1] - inc[1]))
else:
return 'Error!' | Get the reflectivity of the left surface of the structure. | MultilayerOptics/Optics.py | getReflectOfLeftSurface | KanglinXiong/multilayerOptics | 0 | python | def getReflectOfLeftSurface(self, wavelength=520):
'\n \n '
[inc, ref] = self.getLeftIncidentByRightExit(wavelength)
if (abs(inc[0]) != 0.0):
return ((abs((ref[0] / inc[0])) ** 2.0), (ref[1] - inc[1]))
else:
return 'Error!' | def getReflectOfLeftSurface(self, wavelength=520):
'\n \n '
[inc, ref] = self.getLeftIncidentByRightExit(wavelength)
if (abs(inc[0]) != 0.0):
return ((abs((ref[0] / inc[0])) ** 2.0), (ref[1] - inc[1]))
else:
return 'Error!'<|docstring|>Get the reflectivity of the left surface of the structure.<|endoftext|> |
e2f515eb834da25371a823a8d83fb1722263477a6acea25a6addec68b1bf1821 | def getReflectOfRightSurface(self, wavelength=520):
'\n Get the reflectivity of the right surface of the structure.\n '
[inc, ref] = self.getRightIncidentByLeftExit(wavelength)
if (abs(inc[0]) != 0.0):
return ((abs((ref[0] / inc[0])) ** 2.0), (ref[1] - inc[1]))
else:
return 'Error!' | Get the reflectivity of the right surface of the structure. | MultilayerOptics/Optics.py | getReflectOfRightSurface | KanglinXiong/multilayerOptics | 0 | python | def getReflectOfRightSurface(self, wavelength=520):
'\n \n '
[inc, ref] = self.getRightIncidentByLeftExit(wavelength)
if (abs(inc[0]) != 0.0):
return ((abs((ref[0] / inc[0])) ** 2.0), (ref[1] - inc[1]))
else:
return 'Error!' | def getReflectOfRightSurface(self, wavelength=520):
'\n \n '
[inc, ref] = self.getRightIncidentByLeftExit(wavelength)
if (abs(inc[0]) != 0.0):
return ((abs((ref[0] / inc[0])) ** 2.0), (ref[1] - inc[1]))
else:
return 'Error!'<|docstring|>Get the reflectivity of the right surface of the structure.<|endoftext|> |
b62c173e85eb3326edc67cfa213c929916ef65a3bab6e5f117a6d71514a74a04 | def getReflectSpectrum(self, w1=500, w2=600, dw=1, surface='R'):
'\n Get reflectivity over a given wavelength range.\n '
reflectivity = []
w = w1
if (surface == 'L'):
while (w <= w2):
reflectivity.append([w, self.getReflectOfLeftSurface(w)[0]])
w = (w + dw)
else:
while (w <= w2):
reflectivity.append([w, self.getReflectOfRightSurface(w)[0]])
w = (w + dw)
return reflectivity | Get reflectivity over a given wavelength range. | MultilayerOptics/Optics.py | getReflectSpectrum | KanglinXiong/multilayerOptics | 0 | python | def getReflectSpectrum(self, w1=500, w2=600, dw=1, surface='R'):
'\n \n '
reflectivity = []
w = w1
if (surface == 'L'):
while (w <= w2):
reflectivity.append([w, self.getReflectOfLeftSurface(w)[0]])
w = (w + dw)
else:
while (w <= w2):
reflectivity.append([w, self.getReflectOfRightSurface(w)[0]])
w = (w + dw)
return reflectivity | def getReflectSpectrum(self, w1=500, w2=600, dw=1, surface='R'):
'\n \n '
reflectivity = []
w = w1
if (surface == 'L'):
while (w <= w2):
reflectivity.append([w, self.getReflectOfLeftSurface(w)[0]])
w = (w + dw)
else:
while (w <= w2):
reflectivity.append([w, self.getReflectOfRightSurface(w)[0]])
w = (w + dw)
return reflectivity<|docstring|>Get reflectivity over a given wavelength range.<|endoftext|> |
ac9c60f6b1648934ba52c1c8e7bfbbfae081319ce399e841660f3f13bb0a3fc5 | def getFieldIntensity(self, w=520, loc=0):
'\n Get the electrical field intensity within the OptStructure by a location. \n If to get the energy intensity, use epsr ~ n**2, I ~ eps0*epsr*Elec**2.\n '
x = 0.0
layer = None
i = 0
while (i < len(self.struct)):
layer = self.struct[i]
if (loc <= 0.0):
x = (0.0 - (layer.thickness / 2.0))
break
elif ((x <= loc) and ((x + layer.thickness) > loc)):
x = (loc - (x + (layer.thickness / 2.0)))
break
x += layer.thickness
i += 1
if ((i == len(self.struct)) and (x < loc)):
x = (layer.thickness / 2.0)
[El, Er] = layer.getElec(x)
return (abs((El[0] + Er[0])) ** 2) | Get the electrical field intensity within the OptStructure by a location.
If to get the energy intensity, use epsr ~ n**2, I ~ eps0*epsr*Elec**2. | MultilayerOptics/Optics.py | getFieldIntensity | KanglinXiong/multilayerOptics | 0 | python | def getFieldIntensity(self, w=520, loc=0):
'\n Get the electrical field intensity within the OptStructure by a location. \n If to get the energy intensity, use epsr ~ n**2, I ~ eps0*epsr*Elec**2.\n '
x = 0.0
layer = None
i = 0
while (i < len(self.struct)):
layer = self.struct[i]
if (loc <= 0.0):
x = (0.0 - (layer.thickness / 2.0))
break
elif ((x <= loc) and ((x + layer.thickness) > loc)):
x = (loc - (x + (layer.thickness / 2.0)))
break
x += layer.thickness
i += 1
if ((i == len(self.struct)) and (x < loc)):
x = (layer.thickness / 2.0)
[El, Er] = layer.getElec(x)
return (abs((El[0] + Er[0])) ** 2) | def getFieldIntensity(self, w=520, loc=0):
'\n Get the electrical field intensity within the OptStructure by a location. \n If to get the energy intensity, use epsr ~ n**2, I ~ eps0*epsr*Elec**2.\n '
x = 0.0
layer = None
i = 0
while (i < len(self.struct)):
layer = self.struct[i]
if (loc <= 0.0):
x = (0.0 - (layer.thickness / 2.0))
break
elif ((x <= loc) and ((x + layer.thickness) > loc)):
x = (loc - (x + (layer.thickness / 2.0)))
break
x += layer.thickness
i += 1
if ((i == len(self.struct)) and (x < loc)):
x = (layer.thickness / 2.0)
[El, Er] = layer.getElec(x)
return (abs((El[0] + Er[0])) ** 2)<|docstring|>Get the electrical field intensity within the OptStructure by a location.
If to get the energy intensity, use epsr ~ n**2, I ~ eps0*epsr*Elec**2.<|endoftext|> |
030f10f9ab4d6c1f1e58b323141c7eade58fdb75ca6cc4ae62b8c8f46e890390 | def getFieldDistribution(self, w=520, x1=(- 100000.0), x2=100000.0, dx=1, surface='R'):
'\n Build the field and return the distribution [[x, intensity], ...].\n '
self.getReflectSpectrum(w, w, 10.0, surface)
xMin = max(0.0, x1)
xMax = 0.0
for layer in self.struct:
xMax += layer.thickness
xMax = min(xMax, x2)
field = []
x = xMin
while (x <= xMax):
field.append([x, self.getFieldIntensity(w, x)])
x += dx
return field | Build the field and return the distribution [[x, intensity], ...]. | MultilayerOptics/Optics.py | getFieldDistribution | KanglinXiong/multilayerOptics | 0 | python | def getFieldDistribution(self, w=520, x1=(- 100000.0), x2=100000.0, dx=1, surface='R'):
'\n \n '
self.getReflectSpectrum(w, w, 10.0, surface)
xMin = max(0.0, x1)
xMax = 0.0
for layer in self.struct:
xMax += layer.thickness
xMax = min(xMax, x2)
field = []
x = xMin
while (x <= xMax):
field.append([x, self.getFieldIntensity(w, x)])
x += dx
return field | def getFieldDistribution(self, w=520, x1=(- 100000.0), x2=100000.0, dx=1, surface='R'):
'\n \n '
self.getReflectSpectrum(w, w, 10.0, surface)
xMin = max(0.0, x1)
xMax = 0.0
for layer in self.struct:
xMax += layer.thickness
xMax = min(xMax, x2)
field = []
x = xMin
while (x <= xMax):
field.append([x, self.getFieldIntensity(w, x)])
x += dx
return field<|docstring|>Build the field and return the distribution [[x, intensity], ...].<|endoftext|> |
9e8a8021e023e1e9d873e08f2f67c640b80f4152f18b26830db5de25ba6dbfa2 | def getRefractiveIndexDistribution(self):
' Return the [[x, n], ] distribution. '
nkList = []
x = 0.0
for layer in self.struct:
if (layer.thickness == 0.0):
continue
dx = min(0.001, (layer.thickness * 0.001))
nkList.append([(x + dx), layer.getNk().real])
dx = max((layer.thickness - 0.001), (layer.thickness * (1.0 - 0.001)))
nkList.append([(x + dx), layer.getNk().real])
x += layer.thickness
return nkList | Return the [[x, n], ] distribution. | MultilayerOptics/Optics.py | getRefractiveIndexDistribution | KanglinXiong/multilayerOptics | 0 | python | def getRefractiveIndexDistribution(self):
' '
nkList = []
x = 0.0
for layer in self.struct:
if (layer.thickness == 0.0):
continue
dx = min(0.001, (layer.thickness * 0.001))
nkList.append([(x + dx), layer.getNk().real])
dx = max((layer.thickness - 0.001), (layer.thickness * (1.0 - 0.001)))
nkList.append([(x + dx), layer.getNk().real])
x += layer.thickness
return nkList | def getRefractiveIndexDistribution(self):
' '
nkList = []
x = 0.0
for layer in self.struct:
if (layer.thickness == 0.0):
continue
dx = min(0.001, (layer.thickness * 0.001))
nkList.append([(x + dx), layer.getNk().real])
dx = max((layer.thickness - 0.001), (layer.thickness * (1.0 - 0.001)))
nkList.append([(x + dx), layer.getNk().real])
x += layer.thickness
return nkList<|docstring|>Return the [[x, n], ] distribution.<|endoftext|> |
4fff461e4ec60293f0ce2399599d355389953f81c7c07fb5da0ef74dced51084 | def searchMode(self, gainMedium='QW', g0=1500, w0=520, g1=50, g2=5000, w1=100, w2=1600):
'\n Find mode and threshold gain by tuning material gain.\n When no incident light is needed, the cavity emits laser.\n refer to Analysis of multielement semiconductor lasers,\n Journal of Applied Physics 54, 2962 (1983)\n '
def getIncidentWave(g, w):
" local function for Newton's method "
self.setMaterialGain(gainMedium, g)
return abs(self.getRightIncidentByLeftExit(w)[0][0])
nt = Solver.Newton(getIncidentWave, g0, w0, g1, g2, w1, w2)
print('begin mode searching...')
print('iter, residual, gain(1/cm), wavelength(nm)')
return nt.run() | Find mode and threshold gain by tuning material gain.
When no incident light is needed, the cavity emits laser.
refer to Analysis of multielement semiconductor lasers,
Journal of Applied Physics 54, 2962 (1983) | MultilayerOptics/Optics.py | searchMode | KanglinXiong/multilayerOptics | 0 | python | def searchMode(self, gainMedium='QW', g0=1500, w0=520, g1=50, g2=5000, w1=100, w2=1600):
'\n Find mode and threshold gain by tuning material gain.\n When no incident light is needed, the cavity emits laser.\n refer to Analysis of multielement semiconductor lasers,\n Journal of Applied Physics 54, 2962 (1983)\n '
def getIncidentWave(g, w):
" local function for Newton's method "
self.setMaterialGain(gainMedium, g)
return abs(self.getRightIncidentByLeftExit(w)[0][0])
nt = Solver.Newton(getIncidentWave, g0, w0, g1, g2, w1, w2)
print('begin mode searching...')
print('iter, residual, gain(1/cm), wavelength(nm)')
return nt.run() | def searchMode(self, gainMedium='QW', g0=1500, w0=520, g1=50, g2=5000, w1=100, w2=1600):
'\n Find mode and threshold gain by tuning material gain.\n When no incident light is needed, the cavity emits laser.\n refer to Analysis of multielement semiconductor lasers,\n Journal of Applied Physics 54, 2962 (1983)\n '
def getIncidentWave(g, w):
" local function for Newton's method "
self.setMaterialGain(gainMedium, g)
return abs(self.getRightIncidentByLeftExit(w)[0][0])
nt = Solver.Newton(getIncidentWave, g0, w0, g1, g2, w1, w2)
print('begin mode searching...')
print('iter, residual, gain(1/cm), wavelength(nm)')
return nt.run()<|docstring|>Find mode and threshold gain by tuning material gain.
When no incident light is needed, the cavity emits laser.
refer to Analysis of multielement semiconductor lasers,
Journal of Applied Physics 54, 2962 (1983)<|endoftext|> |
4b47ca24de0d0c5c0f1a651e9e9757d1bac202920fd1a5c43d950640ef954c7c | def getIncidentWave(g, w):
" local function for Newton's method "
self.setMaterialGain(gainMedium, g)
return abs(self.getRightIncidentByLeftExit(w)[0][0]) | local function for Newton's method | MultilayerOptics/Optics.py | getIncidentWave | KanglinXiong/multilayerOptics | 0 | python | def getIncidentWave(g, w):
" "
self.setMaterialGain(gainMedium, g)
return abs(self.getRightIncidentByLeftExit(w)[0][0]) | def getIncidentWave(g, w):
" "
self.setMaterialGain(gainMedium, g)
return abs(self.getRightIncidentByLeftExit(w)[0][0])<|docstring|>local function for Newton's method<|endoftext|> |
af49ca51ffd16adfe3c7f4ae1249ff1e6087fbb9215722fe563e4557eff81328 | def closeEvent(self, event: QCloseEvent) -> None:
'This event handler is called with the given event when Qt receives\n a window close request for a top-level widget from the window\n system.\n\n By default, the event is accepted and the widget is\n closed. You can reimplement this function to change the way\n the widget responds to window close requests. For example, you\n can prevent the window from closing by calling ignore() on all\n events.\n\n In other words: If you do not want your widget to be hidden,\n or want some special handling, you should reimplement the\n event handler and ignore() the event.\n\n '
LOG.info('QImageDisplay.closeEvent: accepted: %s', event.isAccepted()) | This event handler is called with the given event when Qt receives
a window close request for a top-level widget from the window
system.
By default, the event is accepted and the widget is
closed. You can reimplement this function to change the way
the widget responds to window close requests. For example, you
can prevent the window from closing by calling ignore() on all
events.
In other words: If you do not want your widget to be hidden,
or want some special handling, you should reimplement the
event handler and ignore() the event. | dltb/thirdparty/qt.py | closeEvent | Petr-By/qtpyvis | 3 | python | def closeEvent(self, event: QCloseEvent) -> None:
'This event handler is called with the given event when Qt receives\n a window close request for a top-level widget from the window\n system.\n\n By default, the event is accepted and the widget is\n closed. You can reimplement this function to change the way\n the widget responds to window close requests. For example, you\n can prevent the window from closing by calling ignore() on all\n events.\n\n In other words: If you do not want your widget to be hidden,\n or want some special handling, you should reimplement the\n event handler and ignore() the event.\n\n '
LOG.info('QImageDisplay.closeEvent: accepted: %s', event.isAccepted()) | def closeEvent(self, event: QCloseEvent) -> None:
'This event handler is called with the given event when Qt receives\n a window close request for a top-level widget from the window\n system.\n\n By default, the event is accepted and the widget is\n closed. You can reimplement this function to change the way\n the widget responds to window close requests. For example, you\n can prevent the window from closing by calling ignore() on all\n events.\n\n In other words: If you do not want your widget to be hidden,\n or want some special handling, you should reimplement the\n event handler and ignore() the event.\n\n '
LOG.info('QImageDisplay.closeEvent: accepted: %s', event.isAccepted())<|docstring|>This event handler is called with the given event when Qt receives
a window close request for a top-level widget from the window
system.
By default, the event is accepted and the widget is
closed. You can reimplement this function to change the way
the widget responds to window close requests. For example, you
can prevent the window from closing by calling ignore() on all
events.
In other words: If you do not want your widget to be hidden,
or want some special handling, you should reimplement the
event handler and ignore() the event.<|endoftext|> |
2e9f744b15131e744d8d6ad6951e36c9cb0caa89bcc4fff7fa460dc3fe719a70 | def hideEvent(self, event: QHideEvent) -> None:
'Hide events are sent to widgets immediately after they have been\n hidden.\n '
LOG.info('QImageDisplay.hideEvent: display was hidden')
self._application.quit() | Hide events are sent to widgets immediately after they have been
hidden. | dltb/thirdparty/qt.py | hideEvent | Petr-By/qtpyvis | 3 | python | def hideEvent(self, event: QHideEvent) -> None:
'Hide events are sent to widgets immediately after they have been\n hidden.\n '
LOG.info('QImageDisplay.hideEvent: display was hidden')
self._application.quit() | def hideEvent(self, event: QHideEvent) -> None:
'Hide events are sent to widgets immediately after they have been\n hidden.\n '
LOG.info('QImageDisplay.hideEvent: display was hidden')
self._application.quit()<|docstring|>Hide events are sent to widgets immediately after they have been
hidden.<|endoftext|> |
1a2a37465aa92adee109062f50e8c1fac594eb2900171e0046daaadf0f7a1720 | def keyPressEvent(self, event: QKeyEvent) -> None:
"This event handler, for event event, can be reimplemented in a\n subclass to receive key press events for the widget.\n\n We add handling of 'Esc' and 'Q' to close the window.\n "
key = event.key()
if ((key in (Qt.Key_Q, Qt.Key_Escape)) and (self._application is not None)):
self._application.quit()
else:
super().keyPressEvent(event) | This event handler, for event event, can be reimplemented in a
subclass to receive key press events for the widget.
We add handling of 'Esc' and 'Q' to close the window. | dltb/thirdparty/qt.py | keyPressEvent | Petr-By/qtpyvis | 3 | python | def keyPressEvent(self, event: QKeyEvent) -> None:
"This event handler, for event event, can be reimplemented in a\n subclass to receive key press events for the widget.\n\n We add handling of 'Esc' and 'Q' to close the window.\n "
key = event.key()
if ((key in (Qt.Key_Q, Qt.Key_Escape)) and (self._application is not None)):
self._application.quit()
else:
super().keyPressEvent(event) | def keyPressEvent(self, event: QKeyEvent) -> None:
"This event handler, for event event, can be reimplemented in a\n subclass to receive key press events for the widget.\n\n We add handling of 'Esc' and 'Q' to close the window.\n "
key = event.key()
if ((key in (Qt.Key_Q, Qt.Key_Escape)) and (self._application is not None)):
self._application.quit()
else:
super().keyPressEvent(event)<|docstring|>This event handler, for event event, can be reimplemented in a
subclass to receive key press events for the widget.
We add handling of 'Esc' and 'Q' to close the window.<|endoftext|> |
a73f3c8144a4201c56528f9b98dab7d473b5a8fca09d7f465e1b2f8d03f45877 | @pyqtSlot()
def onAboutToQuit(self) -> None:
'A slot to be connected to the QApplicaton.aboutToQuit signal.\n It will inform this :py:class:`QImageDisplay` that the main\n event loop of the application is about to finish.\n\n This will not automatically close (hide) the\n :py:class:`QImageDisplay`.\n '
LOG.info('QImageDisplay.onAboutToQuit: application.aboutToQuit') | A slot to be connected to the QApplicaton.aboutToQuit signal.
It will inform this :py:class:`QImageDisplay` that the main
event loop of the application is about to finish.
This will not automatically close (hide) the
:py:class:`QImageDisplay`. | dltb/thirdparty/qt.py | onAboutToQuit | Petr-By/qtpyvis | 3 | python | @pyqtSlot()
def onAboutToQuit(self) -> None:
'A slot to be connected to the QApplicaton.aboutToQuit signal.\n It will inform this :py:class:`QImageDisplay` that the main\n event loop of the application is about to finish.\n\n This will not automatically close (hide) the\n :py:class:`QImageDisplay`.\n '
LOG.info('QImageDisplay.onAboutToQuit: application.aboutToQuit') | @pyqtSlot()
def onAboutToQuit(self) -> None:
'A slot to be connected to the QApplicaton.aboutToQuit signal.\n It will inform this :py:class:`QImageDisplay` that the main\n event loop of the application is about to finish.\n\n This will not automatically close (hide) the\n :py:class:`QImageDisplay`.\n '
LOG.info('QImageDisplay.onAboutToQuit: application.aboutToQuit')<|docstring|>A slot to be connected to the QApplicaton.aboutToQuit signal.
It will inform this :py:class:`QImageDisplay` that the main
event loop of the application is about to finish.
This will not automatically close (hide) the
:py:class:`QImageDisplay`.<|endoftext|> |
9495ebd0567903463487d5ce30a39c3aca9746da876e9fb7fc51541b49f709b7 | @pyqtSlot()
def onTimer(self) -> None:
'Slot to connect the `timeout` signal of a :py:class:`QTimer`.\n If a :py:class:`QApplication` is connected with this\n :py:class:`QImageDisplay`, its main event loop will be stopped.\n '
if (self._application is not None):
self._application.quit() | Slot to connect the `timeout` signal of a :py:class:`QTimer`.
If a :py:class:`QApplication` is connected with this
:py:class:`QImageDisplay`, its main event loop will be stopped. | dltb/thirdparty/qt.py | onTimer | Petr-By/qtpyvis | 3 | python | @pyqtSlot()
def onTimer(self) -> None:
'Slot to connect the `timeout` signal of a :py:class:`QTimer`.\n If a :py:class:`QApplication` is connected with this\n :py:class:`QImageDisplay`, its main event loop will be stopped.\n '
if (self._application is not None):
self._application.quit() | @pyqtSlot()
def onTimer(self) -> None:
'Slot to connect the `timeout` signal of a :py:class:`QTimer`.\n If a :py:class:`QApplication` is connected with this\n :py:class:`QImageDisplay`, its main event loop will be stopped.\n '
if (self._application is not None):
self._application.quit()<|docstring|>Slot to connect the `timeout` signal of a :py:class:`QTimer`.
If a :py:class:`QApplication` is connected with this
:py:class:`QImageDisplay`, its main event loop will be stopped.<|endoftext|> |
93dbdc10ac3124b9113edce1d80d69b5570a39113d33e0415c242e08384dc676 | def _show(self, image: np.ndarray, title: str=None, **kwargs) -> None:
'Show the given image.\n\n Arguments\n ---------\n image: Imagelike\n The image to display.\n title: str\n A title to be displayed as image title.\n '
self._view.setImage(image)
title = ('Qt' if (title is None) else f'Qt: {title}')
self._view.setWindowTitle(title) | Show the given image.
Arguments
---------
image: Imagelike
The image to display.
title: str
A title to be displayed as image title. | dltb/thirdparty/qt.py | _show | Petr-By/qtpyvis | 3 | python | def _show(self, image: np.ndarray, title: str=None, **kwargs) -> None:
'Show the given image.\n\n Arguments\n ---------\n image: Imagelike\n The image to display.\n title: str\n A title to be displayed as image title.\n '
self._view.setImage(image)
title = ('Qt' if (title is None) else f'Qt: {title}')
self._view.setWindowTitle(title) | def _show(self, image: np.ndarray, title: str=None, **kwargs) -> None:
'Show the given image.\n\n Arguments\n ---------\n image: Imagelike\n The image to display.\n title: str\n A title to be displayed as image title.\n '
self._view.setImage(image)
title = ('Qt' if (title is None) else f'Qt: {title}')
self._view.setWindowTitle(title)<|docstring|>Show the given image.
Arguments
---------
image: Imagelike
The image to display.
title: str
A title to be displayed as image title.<|endoftext|> |
57d1bbe3c3b070f2aab9e483e4357e588533f722aa7980a8b5ca0579d742ff61 | def _run_blocking_event_loop(self, timeout: float=None) -> None:
'Start the main event loop for this :py:class:`ImageDisplay`.\n '
LOG.info('Running Qt Main Event Loop.')
if self.event_loop_is_running():
raise RuntimeError('Only one background thread is allowed.')
self._event_loop = QThread.currentThread()
if (timeout is not None):
milliseconds = int((timeout * 1000))
timer = QTimer()
timer.setInterval(milliseconds)
timer.setSingleShot(True)
timer.timeout.connect(self._view.onTimer)
timer.start()
LOG.debug('Starting Qt Main Event Loop (exec_)')
self._application.exec_()
LOG.debug('Qt Main Event Loop (exec_) has ended.')
if (timeout is not None):
timer.stop()
timer.timeout.disconnect(self._view.onTimer)
self._event_loop = None
LOG.info('Qt Main Event Loop finished (event loop=%s, closed=%s).', self.event_loop_is_running(), self.closed) | Start the main event loop for this :py:class:`ImageDisplay`. | dltb/thirdparty/qt.py | _run_blocking_event_loop | Petr-By/qtpyvis | 3 | python | def _run_blocking_event_loop(self, timeout: float=None) -> None:
'\n '
LOG.info('Running Qt Main Event Loop.')
if self.event_loop_is_running():
raise RuntimeError('Only one background thread is allowed.')
self._event_loop = QThread.currentThread()
if (timeout is not None):
milliseconds = int((timeout * 1000))
timer = QTimer()
timer.setInterval(milliseconds)
timer.setSingleShot(True)
timer.timeout.connect(self._view.onTimer)
timer.start()
LOG.debug('Starting Qt Main Event Loop (exec_)')
self._application.exec_()
LOG.debug('Qt Main Event Loop (exec_) has ended.')
if (timeout is not None):
timer.stop()
timer.timeout.disconnect(self._view.onTimer)
self._event_loop = None
LOG.info('Qt Main Event Loop finished (event loop=%s, closed=%s).', self.event_loop_is_running(), self.closed) | def _run_blocking_event_loop(self, timeout: float=None) -> None:
'\n '
LOG.info('Running Qt Main Event Loop.')
if self.event_loop_is_running():
raise RuntimeError('Only one background thread is allowed.')
self._event_loop = QThread.currentThread()
if (timeout is not None):
milliseconds = int((timeout * 1000))
timer = QTimer()
timer.setInterval(milliseconds)
timer.setSingleShot(True)
timer.timeout.connect(self._view.onTimer)
timer.start()
LOG.debug('Starting Qt Main Event Loop (exec_)')
self._application.exec_()
LOG.debug('Qt Main Event Loop (exec_) has ended.')
if (timeout is not None):
timer.stop()
timer.timeout.disconnect(self._view.onTimer)
self._event_loop = None
LOG.info('Qt Main Event Loop finished (event loop=%s, closed=%s).', self.event_loop_is_running(), self.closed)<|docstring|>Start the main event loop for this :py:class:`ImageDisplay`.<|endoftext|> |
5722a811fcda026633cc164d1ae8bf8824b212c54587f4d5e095d8dcb994f8c3 | def _process_events(self) -> None:
'Process events for the graphical user interface of\n this :py:class:`ImageDisplay`. Pending events are processed\n in a blocking mode.\n\n Note: Qt requires that event processing is run in the main\n thread.\n '
self._application.processEvents() | Process events for the graphical user interface of
this :py:class:`ImageDisplay`. Pending events are processed
in a blocking mode.
Note: Qt requires that event processing is run in the main
thread. | dltb/thirdparty/qt.py | _process_events | Petr-By/qtpyvis | 3 | python | def _process_events(self) -> None:
'Process events for the graphical user interface of\n this :py:class:`ImageDisplay`. Pending events are processed\n in a blocking mode.\n\n Note: Qt requires that event processing is run in the main\n thread.\n '
self._application.processEvents() | def _process_events(self) -> None:
'Process events for the graphical user interface of\n this :py:class:`ImageDisplay`. Pending events are processed\n in a blocking mode.\n\n Note: Qt requires that event processing is run in the main\n thread.\n '
self._application.processEvents()<|docstring|>Process events for the graphical user interface of
this :py:class:`ImageDisplay`. Pending events are processed
in a blocking mode.
Note: Qt requires that event processing is run in the main
thread.<|endoftext|> |
2f892ffa668af374e51094f8bbd07997f715729233aa776dd20735ed0f4e5822 | @property
def opened(self) -> bool:
'Check if this image :py:class:`Display` is opened, meaning\n the display window is shown and an event loop is running.\n '
return (self._view.isVisible() and self._opened) | Check if this image :py:class:`Display` is opened, meaning
the display window is shown and an event loop is running. | dltb/thirdparty/qt.py | opened | Petr-By/qtpyvis | 3 | python | @property
def opened(self) -> bool:
'Check if this image :py:class:`Display` is opened, meaning\n the display window is shown and an event loop is running.\n '
return (self._view.isVisible() and self._opened) | @property
def opened(self) -> bool:
'Check if this image :py:class:`Display` is opened, meaning\n the display window is shown and an event loop is running.\n '
return (self._view.isVisible() and self._opened)<|docstring|>Check if this image :py:class:`Display` is opened, meaning
the display window is shown and an event loop is running.<|endoftext|> |
b335518442071bbb6a2708f435460c993d28352691601938823a3d0507ce1b46 | @property
def closed(self) -> bool:
'Check if this image :py:class:`Display` is closed, meaning\n that no window is shown (and no event loop is running).\n '
return ((not self._view.isVisible()) or (not self._opened)) | Check if this image :py:class:`Display` is closed, meaning
that no window is shown (and no event loop is running). | dltb/thirdparty/qt.py | closed | Petr-By/qtpyvis | 3 | python | @property
def closed(self) -> bool:
'Check if this image :py:class:`Display` is closed, meaning\n that no window is shown (and no event loop is running).\n '
return ((not self._view.isVisible()) or (not self._opened)) | @property
def closed(self) -> bool:
'Check if this image :py:class:`Display` is closed, meaning\n that no window is shown (and no event loop is running).\n '
return ((not self._view.isVisible()) or (not self._opened))<|docstring|>Check if this image :py:class:`Display` is closed, meaning
that no window is shown (and no event loop is running).<|endoftext|> |
e95d452ada8e8cb46b8841711d5c513c06b4faff185db087b04c256ccd0b4079 | def __prep_stack(self):
'\n Prepare the stack objects\n '
global jobber_stack
if (not self.stack):
if jobber_stack:
self.stack = jobber_stack
else:
self.stack = jobber_stack = self._setup_stack(ryn=self.ryn)
log.debug('RAETReqChannel Using Jobber Stack at = %s\n', self.stack.ha) | Prepare the stack objects | salt/transport/raet.py | __prep_stack | kaelaworthen/salt | 12 | python | def __prep_stack(self):
'\n \n '
global jobber_stack
if (not self.stack):
if jobber_stack:
self.stack = jobber_stack
else:
self.stack = jobber_stack = self._setup_stack(ryn=self.ryn)
log.debug('RAETReqChannel Using Jobber Stack at = %s\n', self.stack.ha) | def __prep_stack(self):
'\n \n '
global jobber_stack
if (not self.stack):
if jobber_stack:
self.stack = jobber_stack
else:
self.stack = jobber_stack = self._setup_stack(ryn=self.ryn)
log.debug('RAETReqChannel Using Jobber Stack at = %s\n', self.stack.ha)<|docstring|>Prepare the stack objects<|endoftext|> |
65c686d91a8953bfc8150ea69adc0173e91195d5b445db622f1de6f8d94d40fa | def _setup_stack(self, ryn='manor'):
'\n Setup and return the LaneStack and Yard used by by channel when global\n not already setup such as in salt-call to communicate to-from the minion\n\n '
role = self.opts.get('id')
if (not role):
emsg = "Missing role('id') required to setup RAETReqChannel."
log.error((emsg + '\n'))
raise ValueError(emsg)
kind = self.opts.get('__role')
if (kind not in kinds.APPL_KINDS):
emsg = "Invalid application kind = '{0}' for RAETReqChannel.".format(kind)
log.error((emsg + '\n'))
raise ValueError(emsg)
if (kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.master], kinds.APPL_KIND_NAMES[kinds.applKinds.syndic]]):
lanename = 'master'
elif (kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.minion], kinds.APPL_KIND_NAMES[kinds.applKinds.caller]]):
lanename = '{0}_{1}'.format(role, kind)
else:
emsg = "Unsupported application kind '{0}' for RAETReqChannel.".format(kind)
log.error((emsg + '\n'))
raise ValueError(emsg)
name = ('channel' + nacling.uuid(size=18))
stack = LaneStack(name=name, lanename=lanename, sockdirpath=self.opts['sock_dir'])
stack.Pk = raeting.PackKind.pack
stack.addRemote(RemoteYard(stack=stack, name=ryn, lanename=lanename, dirpath=self.opts['sock_dir']))
log.debug('Created Channel Jobber Stack %s\n', stack.name)
return stack | Setup and return the LaneStack and Yard used by by channel when global
not already setup such as in salt-call to communicate to-from the minion | salt/transport/raet.py | _setup_stack | kaelaworthen/salt | 12 | python | def _setup_stack(self, ryn='manor'):
'\n Setup and return the LaneStack and Yard used by by channel when global\n not already setup such as in salt-call to communicate to-from the minion\n\n '
role = self.opts.get('id')
if (not role):
emsg = "Missing role('id') required to setup RAETReqChannel."
log.error((emsg + '\n'))
raise ValueError(emsg)
kind = self.opts.get('__role')
if (kind not in kinds.APPL_KINDS):
emsg = "Invalid application kind = '{0}' for RAETReqChannel.".format(kind)
log.error((emsg + '\n'))
raise ValueError(emsg)
if (kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.master], kinds.APPL_KIND_NAMES[kinds.applKinds.syndic]]):
lanename = 'master'
elif (kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.minion], kinds.APPL_KIND_NAMES[kinds.applKinds.caller]]):
lanename = '{0}_{1}'.format(role, kind)
else:
emsg = "Unsupported application kind '{0}' for RAETReqChannel.".format(kind)
log.error((emsg + '\n'))
raise ValueError(emsg)
name = ('channel' + nacling.uuid(size=18))
stack = LaneStack(name=name, lanename=lanename, sockdirpath=self.opts['sock_dir'])
stack.Pk = raeting.PackKind.pack
stack.addRemote(RemoteYard(stack=stack, name=ryn, lanename=lanename, dirpath=self.opts['sock_dir']))
log.debug('Created Channel Jobber Stack %s\n', stack.name)
return stack | def _setup_stack(self, ryn='manor'):
'\n Setup and return the LaneStack and Yard used by by channel when global\n not already setup such as in salt-call to communicate to-from the minion\n\n '
role = self.opts.get('id')
if (not role):
emsg = "Missing role('id') required to setup RAETReqChannel."
log.error((emsg + '\n'))
raise ValueError(emsg)
kind = self.opts.get('__role')
if (kind not in kinds.APPL_KINDS):
emsg = "Invalid application kind = '{0}' for RAETReqChannel.".format(kind)
log.error((emsg + '\n'))
raise ValueError(emsg)
if (kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.master], kinds.APPL_KIND_NAMES[kinds.applKinds.syndic]]):
lanename = 'master'
elif (kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.minion], kinds.APPL_KIND_NAMES[kinds.applKinds.caller]]):
lanename = '{0}_{1}'.format(role, kind)
else:
emsg = "Unsupported application kind '{0}' for RAETReqChannel.".format(kind)
log.error((emsg + '\n'))
raise ValueError(emsg)
name = ('channel' + nacling.uuid(size=18))
stack = LaneStack(name=name, lanename=lanename, sockdirpath=self.opts['sock_dir'])
stack.Pk = raeting.PackKind.pack
stack.addRemote(RemoteYard(stack=stack, name=ryn, lanename=lanename, dirpath=self.opts['sock_dir']))
log.debug('Created Channel Jobber Stack %s\n', stack.name)
return stack<|docstring|>Setup and return the LaneStack and Yard used by by channel when global
not already setup such as in salt-call to communicate to-from the minion<|endoftext|> |
48ce55810e7c8b9c89a6ad23d7a55e1d3752ea9ba600b12bfdafc0d3f890fbb1 | def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
"\n We don't need to do the crypted_transfer_decode_dictentry routine for\n raet, just wrap send.\n "
return self.send(load, tries, timeout) | We don't need to do the crypted_transfer_decode_dictentry routine for
raet, just wrap send. | salt/transport/raet.py | crypted_transfer_decode_dictentry | kaelaworthen/salt | 12 | python | def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
"\n We don't need to do the crypted_transfer_decode_dictentry routine for\n raet, just wrap send.\n "
return self.send(load, tries, timeout) | def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
"\n We don't need to do the crypted_transfer_decode_dictentry routine for\n raet, just wrap send.\n "
return self.send(load, tries, timeout)<|docstring|>We don't need to do the crypted_transfer_decode_dictentry routine for
raet, just wrap send.<|endoftext|> |
e6990932d80aa545f8dfff79dac90361601fb6851c266999abe70085f85b5fa5 | def send(self, load, tries=3, timeout=60, raw=False):
'\n Send a message load and wait for a relative reply\n One shot wonder\n '
self.__prep_stack()
tried = 1
start = time.time()
track = nacling.uuid(18)
src = (None, self.stack.local.name, track)
self.route = {'src': src, 'dst': self.dst}
msg = {'route': self.route, 'load': load}
self.stack.transmit(msg, self.stack.nameRemotes[self.ryn].uid)
while (track not in jobber_rxMsgs):
self.stack.serviceAll()
while self.stack.rxMsgs:
(msg, sender) = self.stack.rxMsgs.popleft()
jobber_rxMsgs[msg['route']['dst'][2]] = msg
continue
if (track in jobber_rxMsgs):
break
if ((time.time() - start) > timeout):
if (tried >= tries):
raise ValueError("Message send timed out after '{0} * {1}' secs. route = {2} track = {3} load={4}".format(tries, timeout, self.route, track, load))
self.stack.transmit(msg, self.stack.nameRemotes['manor'].uid)
tried += 1
time.sleep(0.01)
return jobber_rxMsgs.pop(track).get('return', {}) | Send a message load and wait for a relative reply
One shot wonder | salt/transport/raet.py | send | kaelaworthen/salt | 12 | python | def send(self, load, tries=3, timeout=60, raw=False):
'\n Send a message load and wait for a relative reply\n One shot wonder\n '
self.__prep_stack()
tried = 1
start = time.time()
track = nacling.uuid(18)
src = (None, self.stack.local.name, track)
self.route = {'src': src, 'dst': self.dst}
msg = {'route': self.route, 'load': load}
self.stack.transmit(msg, self.stack.nameRemotes[self.ryn].uid)
while (track not in jobber_rxMsgs):
self.stack.serviceAll()
while self.stack.rxMsgs:
(msg, sender) = self.stack.rxMsgs.popleft()
jobber_rxMsgs[msg['route']['dst'][2]] = msg
continue
if (track in jobber_rxMsgs):
break
if ((time.time() - start) > timeout):
if (tried >= tries):
raise ValueError("Message send timed out after '{0} * {1}' secs. route = {2} track = {3} load={4}".format(tries, timeout, self.route, track, load))
self.stack.transmit(msg, self.stack.nameRemotes['manor'].uid)
tried += 1
time.sleep(0.01)
return jobber_rxMsgs.pop(track).get('return', {}) | def send(self, load, tries=3, timeout=60, raw=False):
'\n Send a message load and wait for a relative reply\n One shot wonder\n '
self.__prep_stack()
tried = 1
start = time.time()
track = nacling.uuid(18)
src = (None, self.stack.local.name, track)
self.route = {'src': src, 'dst': self.dst}
msg = {'route': self.route, 'load': load}
self.stack.transmit(msg, self.stack.nameRemotes[self.ryn].uid)
while (track not in jobber_rxMsgs):
self.stack.serviceAll()
while self.stack.rxMsgs:
(msg, sender) = self.stack.rxMsgs.popleft()
jobber_rxMsgs[msg['route']['dst'][2]] = msg
continue
if (track in jobber_rxMsgs):
break
if ((time.time() - start) > timeout):
if (tried >= tries):
raise ValueError("Message send timed out after '{0} * {1}' secs. route = {2} track = {3} load={4}".format(tries, timeout, self.route, track, load))
self.stack.transmit(msg, self.stack.nameRemotes['manor'].uid)
tried += 1
time.sleep(0.01)
return jobber_rxMsgs.pop(track).get('return', {})<|docstring|>Send a message load and wait for a relative reply
One shot wonder<|endoftext|> |
5fecc717cc1ab1804a224a899bd96fa58ffcb898ce804bc5d1b121af1a4c7f5f | def routes_list(config_list):
'list of lines with static routes from given config-list '
return [line.rstrip() for line in config_list if line.startswith('route ')] | list of lines with static routes from given config-list | fwOper/route.py | routes_list | aliasgar1978/fwOper | 0 | python | def routes_list(config_list):
' '
return [line.rstrip() for line in config_list if line.startswith('route ')] | def routes_list(config_list):
' '
return [line.rstrip() for line in config_list if line.startswith('route ')]<|docstring|>list of lines with static routes from given config-list<|endoftext|> |
b4a30b1da2dd0337ebaef50f3808f493f25d01622eff701e6f63ba3b4c9111da | def prefix(self, network):
'check matching network in ROUTES object, return matching route '
route_match = None
for sn in reversed(self):
if (network in sn):
route_match = sn
break
if route_match:
return route_match | check matching network in ROUTES object, return matching route | fwOper/route.py | prefix | aliasgar1978/fwOper | 0 | python | def prefix(self, network):
' '
route_match = None
for sn in reversed(self):
if (network in sn):
route_match = sn
break
if route_match:
return route_match | def prefix(self, network):
' '
route_match = None
for sn in reversed(self):
if (network in sn):
route_match = sn
break
if route_match:
return route_match<|docstring|>check matching network in ROUTES object, return matching route<|endoftext|> |
6bb4be706d0a8718e5c67d2c15a02f4d1accaf31a54bb45c04d2040ab9a8a42d | def get_route_objects(self):
'set ROUTE objects in self instance '
for route_line in self.cfg_routes_list:
route = ROUTE(route_line)
route.parse()
self.routes_list.append(route) | set ROUTE objects in self instance | fwOper/route.py | get_route_objects | aliasgar1978/fwOper | 0 | python | def get_route_objects(self):
' '
for route_line in self.cfg_routes_list:
route = ROUTE(route_line)
route.parse()
self.routes_list.append(route) | def get_route_objects(self):
' '
for route_line in self.cfg_routes_list:
route = ROUTE(route_line)
route.parse()
self.routes_list.append(route)<|docstring|>set ROUTE objects in self instance<|endoftext|> |
ba0f85e7e0c1b2afe838306f8b73f2249de2248119d4335ff014f5be611c0ae0 | def parse(self):
'parse static route line and set route_dict '
spl_route_line = self.route_line.split()
self._repr_dic['ifdesc'] = spl_route_line[1]
self._repr_dic['nexthop'] = spl_route_line[4]
try:
self._repr_dic['distance'] = int(spl_route_line[5])
except:
self._repr_dic['distance'] = 1
mask = to_dec_mask(spl_route_line[3])
self._repr_dic['network'] = addressing(((spl_route_line[2] + '/') + str(mask))) | parse static route line and set route_dict | fwOper/route.py | parse | aliasgar1978/fwOper | 0 | python | def parse(self):
' '
spl_route_line = self.route_line.split()
self._repr_dic['ifdesc'] = spl_route_line[1]
self._repr_dic['nexthop'] = spl_route_line[4]
try:
self._repr_dic['distance'] = int(spl_route_line[5])
except:
self._repr_dic['distance'] = 1
mask = to_dec_mask(spl_route_line[3])
self._repr_dic['network'] = addressing(((spl_route_line[2] + '/') + str(mask))) | def parse(self):
' '
spl_route_line = self.route_line.split()
self._repr_dic['ifdesc'] = spl_route_line[1]
self._repr_dic['nexthop'] = spl_route_line[4]
try:
self._repr_dic['distance'] = int(spl_route_line[5])
except:
self._repr_dic['distance'] = 1
mask = to_dec_mask(spl_route_line[3])
self._repr_dic['network'] = addressing(((spl_route_line[2] + '/') + str(mask)))<|docstring|>parse static route line and set route_dict<|endoftext|> |
a56169be2808b5341ab45fbc4f71adbdf887d344aae64eceff775828d167b472 | def listdir(fld):
'\n List the files into a folder with the coplete file path instead of the relative file path like os.listdir.\n\n :param fld: string, folder path\n\n '
if (not os.path.isdir(fld)):
raise FileNotFoundError('Could not find directory: {}'.format(fld))
return [os.path.join(fld, f) for f in os.listdir(fld)] | List the files into a folder with the coplete file path instead of the relative file path like os.listdir.
:param fld: string, folder path | morphapi/utils/data_io.py | listdir | lidakanari/morphapi | 7 | python | def listdir(fld):
'\n List the files into a folder with the coplete file path instead of the relative file path like os.listdir.\n\n :param fld: string, folder path\n\n '
if (not os.path.isdir(fld)):
raise FileNotFoundError('Could not find directory: {}'.format(fld))
return [os.path.join(fld, f) for f in os.listdir(fld)] | def listdir(fld):
'\n List the files into a folder with the coplete file path instead of the relative file path like os.listdir.\n\n :param fld: string, folder path\n\n '
if (not os.path.isdir(fld)):
raise FileNotFoundError('Could not find directory: {}'.format(fld))
return [os.path.join(fld, f) for f in os.listdir(fld)]<|docstring|>List the files into a folder with the coplete file path instead of the relative file path like os.listdir.
:param fld: string, folder path<|endoftext|> |
e983819899481901277efb4b55cb2be4ed40e6fcce38db46c106ab7e0603c7ae | def save_yaml(filepath, content, append=False, topcomment=None):
'\n Saves content to a yaml file\n\n :param filepath: path to a file (must include .yaml)\n :param content: dictionary of stuff to save\n\n '
if ((not filepath.endswith('.yaml')) and (not filepath.endswith('.yml'))):
raise ValueError(f'filepath is invalid {filepath}. Should end with yaml or yml')
if (not append):
method = 'w'
else:
method = 'w+'
with open(filepath, method) as yaml_file:
if (topcomment is not None):
yaml_file.write(topcomment)
yaml.dump(content, yaml_file, default_flow_style=False, indent=4) | Saves content to a yaml file
:param filepath: path to a file (must include .yaml)
:param content: dictionary of stuff to save | morphapi/utils/data_io.py | save_yaml | lidakanari/morphapi | 7 | python | def save_yaml(filepath, content, append=False, topcomment=None):
'\n Saves content to a yaml file\n\n :param filepath: path to a file (must include .yaml)\n :param content: dictionary of stuff to save\n\n '
if ((not filepath.endswith('.yaml')) and (not filepath.endswith('.yml'))):
raise ValueError(f'filepath is invalid {filepath}. Should end with yaml or yml')
if (not append):
method = 'w'
else:
method = 'w+'
with open(filepath, method) as yaml_file:
if (topcomment is not None):
yaml_file.write(topcomment)
yaml.dump(content, yaml_file, default_flow_style=False, indent=4) | def save_yaml(filepath, content, append=False, topcomment=None):
'\n Saves content to a yaml file\n\n :param filepath: path to a file (must include .yaml)\n :param content: dictionary of stuff to save\n\n '
if ((not filepath.endswith('.yaml')) and (not filepath.endswith('.yml'))):
raise ValueError(f'filepath is invalid {filepath}. Should end with yaml or yml')
if (not append):
method = 'w'
else:
method = 'w+'
with open(filepath, method) as yaml_file:
if (topcomment is not None):
yaml_file.write(topcomment)
yaml.dump(content, yaml_file, default_flow_style=False, indent=4)<|docstring|>Saves content to a yaml file
:param filepath: path to a file (must include .yaml)
:param content: dictionary of stuff to save<|endoftext|> |
5a777de8eb6da77313cc9b326e796439666af634012e7cb0997a816d0477ddca | def load_yaml(filepath):
'\n Load a YAML file\n\n :param filepath: path to yaml file\n\n '
if ((filepath is None) or (not os.path.isfile(filepath))):
raise ValueError('unrecognized file path: {}'.format(filepath))
if (('yml' not in filepath) and ('yaml' not in filepath)):
raise ValueError('unrecognized file path: {}'.format(filepath))
return yaml.load(open(filepath), Loader=yaml.FullLoader) | Load a YAML file
:param filepath: path to yaml file | morphapi/utils/data_io.py | load_yaml | lidakanari/morphapi | 7 | python | def load_yaml(filepath):
'\n Load a YAML file\n\n :param filepath: path to yaml file\n\n '
if ((filepath is None) or (not os.path.isfile(filepath))):
raise ValueError('unrecognized file path: {}'.format(filepath))
if (('yml' not in filepath) and ('yaml' not in filepath)):
raise ValueError('unrecognized file path: {}'.format(filepath))
return yaml.load(open(filepath), Loader=yaml.FullLoader) | def load_yaml(filepath):
'\n Load a YAML file\n\n :param filepath: path to yaml file\n\n '
if ((filepath is None) or (not os.path.isfile(filepath))):
raise ValueError('unrecognized file path: {}'.format(filepath))
if (('yml' not in filepath) and ('yaml' not in filepath)):
raise ValueError('unrecognized file path: {}'.format(filepath))
return yaml.load(open(filepath), Loader=yaml.FullLoader)<|docstring|>Load a YAML file
:param filepath: path to yaml file<|endoftext|> |
30f5d59127d4b9b5f5947d2d4c6d7078296a71c68a21b47af6efd0e13aa34c59 | def connected_to_internet(url='http://www.google.com/', timeout=5):
"\n Check that there is an internet connection\n\n :param url: url to use for testing (Default value = 'http://www.google.com/')\n :param timeout: timeout to wait for [in seconds] (Default value = 5)\n "
try:
_ = requests.get(url, timeout=timeout)
return True
except requests.ConnectionError:
print('No internet connection available.')
return False | Check that there is an internet connection
:param url: url to use for testing (Default value = 'http://www.google.com/')
:param timeout: timeout to wait for [in seconds] (Default value = 5) | morphapi/utils/data_io.py | connected_to_internet | lidakanari/morphapi | 7 | python | def connected_to_internet(url='http://www.google.com/', timeout=5):
"\n Check that there is an internet connection\n\n :param url: url to use for testing (Default value = 'http://www.google.com/')\n :param timeout: timeout to wait for [in seconds] (Default value = 5)\n "
try:
_ = requests.get(url, timeout=timeout)
return True
except requests.ConnectionError:
print('No internet connection available.')
return False | def connected_to_internet(url='http://www.google.com/', timeout=5):
"\n Check that there is an internet connection\n\n :param url: url to use for testing (Default value = 'http://www.google.com/')\n :param timeout: timeout to wait for [in seconds] (Default value = 5)\n "
try:
_ = requests.get(url, timeout=timeout)
return True
except requests.ConnectionError:
print('No internet connection available.')
return False<|docstring|>Check that there is an internet connection
:param url: url to use for testing (Default value = 'http://www.google.com/')
:param timeout: timeout to wait for [in seconds] (Default value = 5)<|endoftext|> |
9bf7f2527cf713fb134929066073f82c74a14c0f8dd0b67d1fc589de762465b2 | def flatten_list(lst):
'\n Flattens a list of lists\n \n :param lst: list\n\n '
flatten = []
for item in lst:
if isinstance(item, list):
flatten.extend(item)
else:
flatten.append(item)
return flatten | Flattens a list of lists
:param lst: list | morphapi/utils/data_io.py | flatten_list | lidakanari/morphapi | 7 | python | def flatten_list(lst):
'\n Flattens a list of lists\n \n :param lst: list\n\n '
flatten = []
for item in lst:
if isinstance(item, list):
flatten.extend(item)
else:
flatten.append(item)
return flatten | def flatten_list(lst):
'\n Flattens a list of lists\n \n :param lst: list\n\n '
flatten = []
for item in lst:
if isinstance(item, list):
flatten.extend(item)
else:
flatten.append(item)
return flatten<|docstring|>Flattens a list of lists
:param lst: list<|endoftext|> |
4db23d3ddca8df08863c01c0ac96b5e300dfba063e5fea392d38b7ee7d462e68 | def is_any_item_in_list(L1, L2):
'\n Checks if an item in a list is in another list\n\n :param L1: \n :param L2: \n\n '
inboth = [i for i in L1 if (i in L2)]
if inboth:
return True
else:
return False | Checks if an item in a list is in another list
:param L1:
:param L2: | morphapi/utils/data_io.py | is_any_item_in_list | lidakanari/morphapi | 7 | python | def is_any_item_in_list(L1, L2):
'\n Checks if an item in a list is in another list\n\n :param L1: \n :param L2: \n\n '
inboth = [i for i in L1 if (i in L2)]
if inboth:
return True
else:
return False | def is_any_item_in_list(L1, L2):
'\n Checks if an item in a list is in another list\n\n :param L1: \n :param L2: \n\n '
inboth = [i for i in L1 if (i in L2)]
if inboth:
return True
else:
return False<|docstring|>Checks if an item in a list is in another list
:param L1:
:param L2:<|endoftext|> |
24a7255526c735e043d38c80c5a24b5560e8088c50ad7720e0905e68d83a458d | def connect(self, up, down):
'\n connect to keypress signals\n '
if up:
self.deck.key_up.connect(self.cb_key_up, sender=self)
else:
self.deck.key_up.disconnect(self.cb_key_up, sender=self)
if down:
self.deck.key_down.connect(self.cb_key_down, sender=self)
else:
self.deck.key_down.disconnect(self.cb_key_down, sender=self) | connect to keypress signals | streamdeckui/key.py | connect | kneufeld/streamdeckui | 0 | python | def connect(self, up, down):
'\n \n '
if up:
self.deck.key_up.connect(self.cb_key_up, sender=self)
else:
self.deck.key_up.disconnect(self.cb_key_up, sender=self)
if down:
self.deck.key_down.connect(self.cb_key_down, sender=self)
else:
self.deck.key_down.disconnect(self.cb_key_down, sender=self) | def connect(self, up, down):
'\n \n '
if up:
self.deck.key_up.connect(self.cb_key_up, sender=self)
else:
self.deck.key_up.disconnect(self.cb_key_up, sender=self)
if down:
self.deck.key_down.connect(self.cb_key_down, sender=self)
else:
self.deck.key_down.disconnect(self.cb_key_down, sender=self)<|docstring|>connect to keypress signals<|endoftext|> |
73c8464372385815a6310ba95e9d5185970deeb0c649d5e35a7495828e6f8ca1 | def crop_image(self, image):
'\n image has already been processed by resize_image()\n return "our" section of the image\n '
return crop_image(self.device, image, self.deck.key_spacing, self.index) | image has already been processed by resize_image()
return "our" section of the image | streamdeckui/key.py | crop_image | kneufeld/streamdeckui | 0 | python | def crop_image(self, image):
'\n image has already been processed by resize_image()\n return "our" section of the image\n '
return crop_image(self.device, image, self.deck.key_spacing, self.index) | def crop_image(self, image):
'\n image has already been processed by resize_image()\n return "our" section of the image\n '
return crop_image(self.device, image, self.deck.key_spacing, self.index)<|docstring|>image has already been processed by resize_image()
return "our" section of the image<|endoftext|> |
fd785149bce2bab084bfa20055aedb49fbc772a40da408575899d89a89874e2c | def set_image(self, state, image):
'\n store the image but do not show it, use show_image for that\n '
if isinstance(image, pathlib.PurePath):
image = str(image)
image = render_key_image(self.deck, image)
self._images[state] = image | store the image but do not show it, use show_image for that | streamdeckui/key.py | set_image | kneufeld/streamdeckui | 0 | python | def set_image(self, state, image):
'\n \n '
if isinstance(image, pathlib.PurePath):
image = str(image)
image = render_key_image(self.deck, image)
self._images[state] = image | def set_image(self, state, image):
'\n \n '
if isinstance(image, pathlib.PurePath):
image = str(image)
image = render_key_image(self.deck, image)
self._images[state] = image<|docstring|>store the image but do not show it, use show_image for that<|endoftext|> |
77586d378cc5f78127f3e46250ede96186d47309a62abb5f5de3ff4cde167d2a | def __init__(self, mainEngine, dataList=None, *args):
'Constructor'
QtWidgets.QDialog.__init__(self, *args)
self.mainEngine = mainEngine
if (mainEngine is None):
if (dataList is None):
dataList = []
else:
dataList = dataList
else:
dataList = self.loadDataListFromDB()
self.dataHeader = [text.GATEWAY_NAME, text.CONNECT_STATUS, text.USER_ID, text.BROKER_ID, text.GATEWAY_MODULE, text.TD_ADDRESS, text.MD_ADDRESS, text.GATEWAY_DISPLAY_NAME]
self.setGeometry(0, 0, 1120, 600)
self.move((QtWidgets.QDesktopWidget().availableGeometry().center() - self.frameGeometry().center()))
self.setWindowTitle(text.CONNECTION_MANAGER)
self.tableModel = ConnectionTableModel(self, dataList, self.dataHeader)
self.tableView = QtWidgets.QTableView()
self.tableView.setModel(self.tableModel)
self.tableView.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.tableView.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.tableView.setFocusPolicy(Qt.NoFocus)
self.tableView.clicked.connect(self.showSelection)
self.tableView.clicked.connect(self.selectRow)
self.tableView.doubleClicked.connect(self.rowDoubleClicked)
self.tableView.setSortingEnabled(False)
self.tableView.setAlternatingRowColors(True)
self.tableView.setColumnWidth(0, 140)
self.tableView.setColumnWidth(1, 70)
self.tableView.setColumnWidth(2, 90)
self.tableView.setColumnWidth(3, 70)
self.tableView.setColumnWidth(4, 170)
self.tableView.setColumnWidth(5, 190)
self.tableView.setColumnWidth(6, 190)
self.tableView.setColumnWidth(7, 150)
tableVBoxLayout = QtWidgets.QVBoxLayout()
tableVBoxLayout.addWidget(self.tableView)
self.selectAllButton = QtWidgets.QPushButton(text.SELECT_ALL)
self.selectReverseButton = QtWidgets.QPushButton(text.SELECT_REVERSE)
self.deleteSelectedButton = QtWidgets.QPushButton(text.DELETE_SELECTED)
self.addButton = QtWidgets.QPushButton(text.ADD)
self.connectSelectedButton = QtWidgets.QPushButton(text.CONNECT_SELECTED)
self.connectAllButton = QtWidgets.QPushButton(text.CONNECT_ALL)
self.disconnectSelectedButton = QtWidgets.QPushButton(text.DISCONNECT_SELECTED)
self.disconnectAllButton = QtWidgets.QPushButton(text.DISCONNECT_ALL)
buttonGridLayout = QtWidgets.QGridLayout()
buttonGridLayout.addWidget(self.selectAllButton, 0, 1)
buttonGridLayout.addWidget(self.selectReverseButton, 0, 2)
buttonGridLayout.addWidget(self.deleteSelectedButton, 0, 3)
buttonGridLayout.addWidget(self.addButton, 0, 4)
buttonGridLayout.addWidget(self.connectSelectedButton, 0, 5)
buttonGridLayout.addWidget(self.connectAllButton, 0, 6)
buttonGridLayout.addWidget(self.disconnectSelectedButton, 0, 7)
buttonGridLayout.addWidget(self.disconnectAllButton, 0, 8)
self.selectAllButton.clicked.connect(self.selectAll)
self.selectReverseButton.clicked.connect(self.selectReverse)
self.deleteSelectedButton.clicked.connect(self.deleteSelected)
self.addButton.clicked.connect(self.add)
self.connectSelectedButton.clicked.connect(self.connectSelected)
self.connectAllButton.clicked.connect(self.connectAll)
self.disconnectSelectedButton.clicked.connect(self.disconnectSelected)
self.disconnectAllButton.clicked.connect(self.disconnectAll)
layout = QtWidgets.QGridLayout(self)
layout.addLayout(tableVBoxLayout, 0, 0)
layout.addLayout(buttonGridLayout, 1, 0)
self.setLayout(layout) | Constructor | redtorch/trader/uiConnectionManagerDialog.py | __init__ | sun0x00/redtorch_python | 1 | python | def __init__(self, mainEngine, dataList=None, *args):
QtWidgets.QDialog.__init__(self, *args)
self.mainEngine = mainEngine
if (mainEngine is None):
if (dataList is None):
dataList = []
else:
dataList = dataList
else:
dataList = self.loadDataListFromDB()
self.dataHeader = [text.GATEWAY_NAME, text.CONNECT_STATUS, text.USER_ID, text.BROKER_ID, text.GATEWAY_MODULE, text.TD_ADDRESS, text.MD_ADDRESS, text.GATEWAY_DISPLAY_NAME]
self.setGeometry(0, 0, 1120, 600)
self.move((QtWidgets.QDesktopWidget().availableGeometry().center() - self.frameGeometry().center()))
self.setWindowTitle(text.CONNECTION_MANAGER)
self.tableModel = ConnectionTableModel(self, dataList, self.dataHeader)
self.tableView = QtWidgets.QTableView()
self.tableView.setModel(self.tableModel)
self.tableView.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.tableView.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.tableView.setFocusPolicy(Qt.NoFocus)
self.tableView.clicked.connect(self.showSelection)
self.tableView.clicked.connect(self.selectRow)
self.tableView.doubleClicked.connect(self.rowDoubleClicked)
self.tableView.setSortingEnabled(False)
self.tableView.setAlternatingRowColors(True)
self.tableView.setColumnWidth(0, 140)
self.tableView.setColumnWidth(1, 70)
self.tableView.setColumnWidth(2, 90)
self.tableView.setColumnWidth(3, 70)
self.tableView.setColumnWidth(4, 170)
self.tableView.setColumnWidth(5, 190)
self.tableView.setColumnWidth(6, 190)
self.tableView.setColumnWidth(7, 150)
tableVBoxLayout = QtWidgets.QVBoxLayout()
tableVBoxLayout.addWidget(self.tableView)
self.selectAllButton = QtWidgets.QPushButton(text.SELECT_ALL)
self.selectReverseButton = QtWidgets.QPushButton(text.SELECT_REVERSE)
self.deleteSelectedButton = QtWidgets.QPushButton(text.DELETE_SELECTED)
self.addButton = QtWidgets.QPushButton(text.ADD)
self.connectSelectedButton = QtWidgets.QPushButton(text.CONNECT_SELECTED)
self.connectAllButton = QtWidgets.QPushButton(text.CONNECT_ALL)
self.disconnectSelectedButton = QtWidgets.QPushButton(text.DISCONNECT_SELECTED)
self.disconnectAllButton = QtWidgets.QPushButton(text.DISCONNECT_ALL)
buttonGridLayout = QtWidgets.QGridLayout()
buttonGridLayout.addWidget(self.selectAllButton, 0, 1)
buttonGridLayout.addWidget(self.selectReverseButton, 0, 2)
buttonGridLayout.addWidget(self.deleteSelectedButton, 0, 3)
buttonGridLayout.addWidget(self.addButton, 0, 4)
buttonGridLayout.addWidget(self.connectSelectedButton, 0, 5)
buttonGridLayout.addWidget(self.connectAllButton, 0, 6)
buttonGridLayout.addWidget(self.disconnectSelectedButton, 0, 7)
buttonGridLayout.addWidget(self.disconnectAllButton, 0, 8)
self.selectAllButton.clicked.connect(self.selectAll)
self.selectReverseButton.clicked.connect(self.selectReverse)
self.deleteSelectedButton.clicked.connect(self.deleteSelected)
self.addButton.clicked.connect(self.add)
self.connectSelectedButton.clicked.connect(self.connectSelected)
self.connectAllButton.clicked.connect(self.connectAll)
self.disconnectSelectedButton.clicked.connect(self.disconnectSelected)
self.disconnectAllButton.clicked.connect(self.disconnectAll)
layout = QtWidgets.QGridLayout(self)
layout.addLayout(tableVBoxLayout, 0, 0)
layout.addLayout(buttonGridLayout, 1, 0)
self.setLayout(layout) | def __init__(self, mainEngine, dataList=None, *args):
QtWidgets.QDialog.__init__(self, *args)
self.mainEngine = mainEngine
if (mainEngine is None):
if (dataList is None):
dataList = []
else:
dataList = dataList
else:
dataList = self.loadDataListFromDB()
self.dataHeader = [text.GATEWAY_NAME, text.CONNECT_STATUS, text.USER_ID, text.BROKER_ID, text.GATEWAY_MODULE, text.TD_ADDRESS, text.MD_ADDRESS, text.GATEWAY_DISPLAY_NAME]
self.setGeometry(0, 0, 1120, 600)
self.move((QtWidgets.QDesktopWidget().availableGeometry().center() - self.frameGeometry().center()))
self.setWindowTitle(text.CONNECTION_MANAGER)
self.tableModel = ConnectionTableModel(self, dataList, self.dataHeader)
self.tableView = QtWidgets.QTableView()
self.tableView.setModel(self.tableModel)
self.tableView.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.tableView.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.tableView.setFocusPolicy(Qt.NoFocus)
self.tableView.clicked.connect(self.showSelection)
self.tableView.clicked.connect(self.selectRow)
self.tableView.doubleClicked.connect(self.rowDoubleClicked)
self.tableView.setSortingEnabled(False)
self.tableView.setAlternatingRowColors(True)
self.tableView.setColumnWidth(0, 140)
self.tableView.setColumnWidth(1, 70)
self.tableView.setColumnWidth(2, 90)
self.tableView.setColumnWidth(3, 70)
self.tableView.setColumnWidth(4, 170)
self.tableView.setColumnWidth(5, 190)
self.tableView.setColumnWidth(6, 190)
self.tableView.setColumnWidth(7, 150)
tableVBoxLayout = QtWidgets.QVBoxLayout()
tableVBoxLayout.addWidget(self.tableView)
self.selectAllButton = QtWidgets.QPushButton(text.SELECT_ALL)
self.selectReverseButton = QtWidgets.QPushButton(text.SELECT_REVERSE)
self.deleteSelectedButton = QtWidgets.QPushButton(text.DELETE_SELECTED)
self.addButton = QtWidgets.QPushButton(text.ADD)
self.connectSelectedButton = QtWidgets.QPushButton(text.CONNECT_SELECTED)
self.connectAllButton = QtWidgets.QPushButton(text.CONNECT_ALL)
self.disconnectSelectedButton = QtWidgets.QPushButton(text.DISCONNECT_SELECTED)
self.disconnectAllButton = QtWidgets.QPushButton(text.DISCONNECT_ALL)
buttonGridLayout = QtWidgets.QGridLayout()
buttonGridLayout.addWidget(self.selectAllButton, 0, 1)
buttonGridLayout.addWidget(self.selectReverseButton, 0, 2)
buttonGridLayout.addWidget(self.deleteSelectedButton, 0, 3)
buttonGridLayout.addWidget(self.addButton, 0, 4)
buttonGridLayout.addWidget(self.connectSelectedButton, 0, 5)
buttonGridLayout.addWidget(self.connectAllButton, 0, 6)
buttonGridLayout.addWidget(self.disconnectSelectedButton, 0, 7)
buttonGridLayout.addWidget(self.disconnectAllButton, 0, 8)
self.selectAllButton.clicked.connect(self.selectAll)
self.selectReverseButton.clicked.connect(self.selectReverse)
self.deleteSelectedButton.clicked.connect(self.deleteSelected)
self.addButton.clicked.connect(self.add)
self.connectSelectedButton.clicked.connect(self.connectSelected)
self.connectAllButton.clicked.connect(self.connectAll)
self.disconnectSelectedButton.clicked.connect(self.disconnectSelected)
self.disconnectAllButton.clicked.connect(self.disconnectAll)
layout = QtWidgets.QGridLayout(self)
layout.addLayout(tableVBoxLayout, 0, 0)
layout.addLayout(buttonGridLayout, 1, 0)
self.setLayout(layout)<|docstring|>Constructor<|endoftext|> |
9bbc8f1dc09581a20e7d88b484cc1e951003ed0cf9e4606b8931d67ba67b545b | def add(self):
'增加'
dialog = EditDialog()
if dialog.exec_():
data = dialog.getData()
data['_id'] = ((data['brokerID'] + '_') + data['userID'])
gatewayname = str(data['_id'])
accountList = self.mainEngine.dbQuery(constant.RED_TORCH_DB_NAME, 'account_collection', {'_id': data['_id']})
if (len(accountList) > 0):
quitMsg = u'记录已存在,是否覆盖?'
reply = QtWidgets.QMessageBox.question(self, text.WARN, quitMsg, QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if (reply == QtWidgets.QMessageBox.Yes):
self.mainEngine.dbUpdate(constant.RED_TORCH_DB_NAME, 'account_collection', data, {'_id': data['_id']})
else:
self.mainEngine.dbInsert(constant.RED_TORCH_DB_NAME, 'account_collection', data)
if dialog.getSaveAndConnectFlag():
self.gatewayConnect(gatewayname)
self.setCursor(Qt.WaitCursor)
time.sleep(1.5)
self.setCursor(Qt.ArrowCursor)
dialog.destroy()
dataList = self.loadDataListFromDB()
self.tableModel.updateModel(dataList) | 增加 | redtorch/trader/uiConnectionManagerDialog.py | add | sun0x00/redtorch_python | 1 | python | def add(self):
dialog = EditDialog()
if dialog.exec_():
data = dialog.getData()
data['_id'] = ((data['brokerID'] + '_') + data['userID'])
gatewayname = str(data['_id'])
accountList = self.mainEngine.dbQuery(constant.RED_TORCH_DB_NAME, 'account_collection', {'_id': data['_id']})
if (len(accountList) > 0):
quitMsg = u'记录已存在,是否覆盖?'
reply = QtWidgets.QMessageBox.question(self, text.WARN, quitMsg, QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if (reply == QtWidgets.QMessageBox.Yes):
self.mainEngine.dbUpdate(constant.RED_TORCH_DB_NAME, 'account_collection', data, {'_id': data['_id']})
else:
self.mainEngine.dbInsert(constant.RED_TORCH_DB_NAME, 'account_collection', data)
if dialog.getSaveAndConnectFlag():
self.gatewayConnect(gatewayname)
self.setCursor(Qt.WaitCursor)
time.sleep(1.5)
self.setCursor(Qt.ArrowCursor)
dialog.destroy()
dataList = self.loadDataListFromDB()
self.tableModel.updateModel(dataList) | def add(self):
dialog = EditDialog()
if dialog.exec_():
data = dialog.getData()
data['_id'] = ((data['brokerID'] + '_') + data['userID'])
gatewayname = str(data['_id'])
accountList = self.mainEngine.dbQuery(constant.RED_TORCH_DB_NAME, 'account_collection', {'_id': data['_id']})
if (len(accountList) > 0):
quitMsg = u'记录已存在,是否覆盖?'
reply = QtWidgets.QMessageBox.question(self, text.WARN, quitMsg, QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if (reply == QtWidgets.QMessageBox.Yes):
self.mainEngine.dbUpdate(constant.RED_TORCH_DB_NAME, 'account_collection', data, {'_id': data['_id']})
else:
self.mainEngine.dbInsert(constant.RED_TORCH_DB_NAME, 'account_collection', data)
if dialog.getSaveAndConnectFlag():
self.gatewayConnect(gatewayname)
self.setCursor(Qt.WaitCursor)
time.sleep(1.5)
self.setCursor(Qt.ArrowCursor)
dialog.destroy()
dataList = self.loadDataListFromDB()
self.tableModel.updateModel(dataList)<|docstring|>增加<|endoftext|> |
5fcf15f065fcd82d77b4c32a06440ed973a8f6b65d3e939e711bd8abc41f9fcf | def edit(self, index):
'编辑'
gatewayName = self.tableModel.dataList[index.row()][0].text()
accountList = self.mainEngine.dbQuery(constant.RED_TORCH_DB_NAME, 'account_collection', {'_id': gatewayName})
if (len(accountList) == 0):
QtWidgets.QMessageBox.information(self, text.INFO, u'记录不存在', QtWidgets.QMessageBox.Ok)
else:
data = accountList[0]
gatewayName = ((data['brokerID'] + '_') + data['userID'])
if self.checkGatewayConnectState(gatewayName):
reply = QtWidgets.QMessageBox.question(self, text.WARN, u'修改将会断开连接,请确认?', QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if (reply == QtWidgets.QMessageBox.Yes):
gateway = self.mainEngine.getGateway(gatewayName)
gateway.close()
else:
return
self.mainEngine.removeGateway(gatewayName)
dialog = EditDialog(edit=True, data=data, parent=self)
if dialog.exec_():
data = dialog.getData()
data['_id'] = gatewayName
self.mainEngine.dbUpdate(constant.RED_TORCH_DB_NAME, 'account_collection', data, {'_id': data['_id']})
if dialog.getSaveAndConnectFlag():
self.gatewayConnect(gatewayName)
self.setCursor(Qt.WaitCursor)
time.sleep(1.5)
self.setCursor(Qt.ArrowCursor)
dialog.destroy()
dataList = self.loadDataListFromDB()
self.tableModel.updateModel(dataList) | 编辑 | redtorch/trader/uiConnectionManagerDialog.py | edit | sun0x00/redtorch_python | 1 | python | def edit(self, index):
gatewayName = self.tableModel.dataList[index.row()][0].text()
accountList = self.mainEngine.dbQuery(constant.RED_TORCH_DB_NAME, 'account_collection', {'_id': gatewayName})
if (len(accountList) == 0):
QtWidgets.QMessageBox.information(self, text.INFO, u'记录不存在', QtWidgets.QMessageBox.Ok)
else:
data = accountList[0]
gatewayName = ((data['brokerID'] + '_') + data['userID'])
if self.checkGatewayConnectState(gatewayName):
reply = QtWidgets.QMessageBox.question(self, text.WARN, u'修改将会断开连接,请确认?', QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if (reply == QtWidgets.QMessageBox.Yes):
gateway = self.mainEngine.getGateway(gatewayName)
gateway.close()
else:
return
self.mainEngine.removeGateway(gatewayName)
dialog = EditDialog(edit=True, data=data, parent=self)
if dialog.exec_():
data = dialog.getData()
data['_id'] = gatewayName
self.mainEngine.dbUpdate(constant.RED_TORCH_DB_NAME, 'account_collection', data, {'_id': data['_id']})
if dialog.getSaveAndConnectFlag():
self.gatewayConnect(gatewayName)
self.setCursor(Qt.WaitCursor)
time.sleep(1.5)
self.setCursor(Qt.ArrowCursor)
dialog.destroy()
dataList = self.loadDataListFromDB()
self.tableModel.updateModel(dataList) | def edit(self, index):
gatewayName = self.tableModel.dataList[index.row()][0].text()
accountList = self.mainEngine.dbQuery(constant.RED_TORCH_DB_NAME, 'account_collection', {'_id': gatewayName})
if (len(accountList) == 0):
QtWidgets.QMessageBox.information(self, text.INFO, u'记录不存在', QtWidgets.QMessageBox.Ok)
else:
data = accountList[0]
gatewayName = ((data['brokerID'] + '_') + data['userID'])
if self.checkGatewayConnectState(gatewayName):
reply = QtWidgets.QMessageBox.question(self, text.WARN, u'修改将会断开连接,请确认?', QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if (reply == QtWidgets.QMessageBox.Yes):
gateway = self.mainEngine.getGateway(gatewayName)
gateway.close()
else:
return
self.mainEngine.removeGateway(gatewayName)
dialog = EditDialog(edit=True, data=data, parent=self)
if dialog.exec_():
data = dialog.getData()
data['_id'] = gatewayName
self.mainEngine.dbUpdate(constant.RED_TORCH_DB_NAME, 'account_collection', data, {'_id': data['_id']})
if dialog.getSaveAndConnectFlag():
self.gatewayConnect(gatewayName)
self.setCursor(Qt.WaitCursor)
time.sleep(1.5)
self.setCursor(Qt.ArrowCursor)
dialog.destroy()
dataList = self.loadDataListFromDB()
self.tableModel.updateModel(dataList)<|docstring|>编辑<|endoftext|> |
07bc0d692b403d49047511750066ec3a2b22ef6c10de66af4a5df8923a61b027 | def sort(self, col, order):
'根据给定的列进行排序'
if (col != 0):
self.layoutAboutToBeChanged.emit()
self.dataList = sorted(self.dataList, key=operator.itemgetter(col))
if (order == Qt.DescendingOrder):
self.dataList.reverse()
self.layoutChanged.emit() | 根据给定的列进行排序 | redtorch/trader/uiConnectionManagerDialog.py | sort | sun0x00/redtorch_python | 1 | python | def sort(self, col, order):
if (col != 0):
self.layoutAboutToBeChanged.emit()
self.dataList = sorted(self.dataList, key=operator.itemgetter(col))
if (order == Qt.DescendingOrder):
self.dataList.reverse()
self.layoutChanged.emit() | def sort(self, col, order):
if (col != 0):
self.layoutAboutToBeChanged.emit()
self.dataList = sorted(self.dataList, key=operator.itemgetter(col))
if (order == Qt.DescendingOrder):
self.dataList.reverse()
self.layoutChanged.emit()<|docstring|>根据给定的列进行排序<|endoftext|> |
74afe75713d9594a5ec37c2ee7474ebb1663b51f755c04b0a5df0a89eefbd880 | def validate(self):
'验证输入'
state = self.userIDValidator.validate(self.userIDLineEdit.text(), 0)[0]
if (state != QtGui.QValidator.Acceptable):
QtWidgets.QMessageBox.information(self, text.INFO, u'用户ID输入不正确', QtWidgets.QMessageBox.Ok)
return False
state = self.passwordValidator.validate(self.passwordLineEdit.text(), 0)[0]
if (state != QtGui.QValidator.Acceptable):
QtWidgets.QMessageBox.information(self, text.INFO, u'密码输入不正确', QtWidgets.QMessageBox.Ok)
return False
state = self.brokerIDValidator.validate(self.brokerIDLineEdit.text(), 0)[0]
if (state != QtGui.QValidator.Acceptable):
QtWidgets.QMessageBox.information(self, text.INFO, u'经纪商ID输入不正确', QtWidgets.QMessageBox.Ok)
return False
state = self.gatewayDisplayNameValidator.validate(self.gatewayDisplayNameLineEdit.text(), 0)[0]
if (state != QtGui.QValidator.Acceptable):
QtWidgets.QMessageBox.information(self, text.INFO, u'网关显示名称输入不正确', QtWidgets.QMessageBox.Ok)
return False
state = self.tdAddressValidator.validate(self.tdAddressLineEdit.text(), 0)[0]
if (state != QtGui.QValidator.Acceptable):
QtWidgets.QMessageBox.information(self, text.INFO, u'交易服务器地址输入不正确', QtWidgets.QMessageBox.Ok)
return False
state = self.mdAddressValidator.validate(self.mdAddressLineEdit.text(), 0)[0]
if (state != QtGui.QValidator.Acceptable):
QtWidgets.QMessageBox.information(self, text.INFO, u'行情服务器地址输入不正确', QtWidgets.QMessageBox.Ok)
return False
return True | 验证输入 | redtorch/trader/uiConnectionManagerDialog.py | validate | sun0x00/redtorch_python | 1 | python | def validate(self):
state = self.userIDValidator.validate(self.userIDLineEdit.text(), 0)[0]
if (state != QtGui.QValidator.Acceptable):
QtWidgets.QMessageBox.information(self, text.INFO, u'用户ID输入不正确', QtWidgets.QMessageBox.Ok)
return False
state = self.passwordValidator.validate(self.passwordLineEdit.text(), 0)[0]
if (state != QtGui.QValidator.Acceptable):
QtWidgets.QMessageBox.information(self, text.INFO, u'密码输入不正确', QtWidgets.QMessageBox.Ok)
return False
state = self.brokerIDValidator.validate(self.brokerIDLineEdit.text(), 0)[0]
if (state != QtGui.QValidator.Acceptable):
QtWidgets.QMessageBox.information(self, text.INFO, u'经纪商ID输入不正确', QtWidgets.QMessageBox.Ok)
return False
state = self.gatewayDisplayNameValidator.validate(self.gatewayDisplayNameLineEdit.text(), 0)[0]
if (state != QtGui.QValidator.Acceptable):
QtWidgets.QMessageBox.information(self, text.INFO, u'网关显示名称输入不正确', QtWidgets.QMessageBox.Ok)
return False
state = self.tdAddressValidator.validate(self.tdAddressLineEdit.text(), 0)[0]
if (state != QtGui.QValidator.Acceptable):
QtWidgets.QMessageBox.information(self, text.INFO, u'交易服务器地址输入不正确', QtWidgets.QMessageBox.Ok)
return False
state = self.mdAddressValidator.validate(self.mdAddressLineEdit.text(), 0)[0]
if (state != QtGui.QValidator.Acceptable):
QtWidgets.QMessageBox.information(self, text.INFO, u'行情服务器地址输入不正确', QtWidgets.QMessageBox.Ok)
return False
return True | def validate(self):
state = self.userIDValidator.validate(self.userIDLineEdit.text(), 0)[0]
if (state != QtGui.QValidator.Acceptable):
QtWidgets.QMessageBox.information(self, text.INFO, u'用户ID输入不正确', QtWidgets.QMessageBox.Ok)
return False
state = self.passwordValidator.validate(self.passwordLineEdit.text(), 0)[0]
if (state != QtGui.QValidator.Acceptable):
QtWidgets.QMessageBox.information(self, text.INFO, u'密码输入不正确', QtWidgets.QMessageBox.Ok)
return False
state = self.brokerIDValidator.validate(self.brokerIDLineEdit.text(), 0)[0]
if (state != QtGui.QValidator.Acceptable):
QtWidgets.QMessageBox.information(self, text.INFO, u'经纪商ID输入不正确', QtWidgets.QMessageBox.Ok)
return False
state = self.gatewayDisplayNameValidator.validate(self.gatewayDisplayNameLineEdit.text(), 0)[0]
if (state != QtGui.QValidator.Acceptable):
QtWidgets.QMessageBox.information(self, text.INFO, u'网关显示名称输入不正确', QtWidgets.QMessageBox.Ok)
return False
state = self.tdAddressValidator.validate(self.tdAddressLineEdit.text(), 0)[0]
if (state != QtGui.QValidator.Acceptable):
QtWidgets.QMessageBox.information(self, text.INFO, u'交易服务器地址输入不正确', QtWidgets.QMessageBox.Ok)
return False
state = self.mdAddressValidator.validate(self.mdAddressLineEdit.text(), 0)[0]
if (state != QtGui.QValidator.Acceptable):
QtWidgets.QMessageBox.information(self, text.INFO, u'行情服务器地址输入不正确', QtWidgets.QMessageBox.Ok)
return False
return True<|docstring|>验证输入<|endoftext|> |
7c45ab536cf5dd608ce1af33bb2e5fdb8279c0015f36098cf79ae3e002a5d7cb | def save(self):
'保存'
if self.validate():
self.accept() | 保存 | redtorch/trader/uiConnectionManagerDialog.py | save | sun0x00/redtorch_python | 1 | python | def save(self):
if self.validate():
self.accept() | def save(self):
if self.validate():
self.accept()<|docstring|>保存<|endoftext|> |
c7639e51113ba674bf8111776135326910b609f0c4ff1dcf5f03a0a264c5e33e | def saveAndConnect(self):
'保存并连接'
if self.validate():
self.saveAndConnectFlag = True
self.accept() | 保存并连接 | redtorch/trader/uiConnectionManagerDialog.py | saveAndConnect | sun0x00/redtorch_python | 1 | python | def saveAndConnect(self):
if self.validate():
self.saveAndConnectFlag = True
self.accept() | def saveAndConnect(self):
if self.validate():
self.saveAndConnectFlag = True
self.accept()<|docstring|>保存并连接<|endoftext|> |
2bc12fa1398808bf1dee77bca04df9a8d342f6b63c6461b76407ae4f80ae975d | def getSaveAndConnectFlag(self):
'获取保存并连接标记'
return self.saveAndConnectFlag | 获取保存并连接标记 | redtorch/trader/uiConnectionManagerDialog.py | getSaveAndConnectFlag | sun0x00/redtorch_python | 1 | python | def getSaveAndConnectFlag(self):
return self.saveAndConnectFlag | def getSaveAndConnectFlag(self):
return self.saveAndConnectFlag<|docstring|>获取保存并连接标记<|endoftext|> |
5db150842bd006e5bd777fbd6f810cc5ba27250f57aff72d8090dc157a3057ce | def getData(self):
'获取数据'
return {'userID': self.userIDLineEdit.text(), 'password': self.passwordLineEdit.text(), 'authCode': self.authCodeLineEdit.text(), 'userProductInfo': self.userProductInfoLineEdit.text(), 'brokerID': self.brokerIDLineEdit.text(), 'gatewayModule': self.gatewayModuleComboBox.currentText(), 'gatewayDisplayName': self.gatewayDisplayNameLineEdit.text(), 'tdAddress': self.tdAddressLineEdit.text(), 'mdAddress': self.mdAddressLineEdit.text()} | 获取数据 | redtorch/trader/uiConnectionManagerDialog.py | getData | sun0x00/redtorch_python | 1 | python | def getData(self):
return {'userID': self.userIDLineEdit.text(), 'password': self.passwordLineEdit.text(), 'authCode': self.authCodeLineEdit.text(), 'userProductInfo': self.userProductInfoLineEdit.text(), 'brokerID': self.brokerIDLineEdit.text(), 'gatewayModule': self.gatewayModuleComboBox.currentText(), 'gatewayDisplayName': self.gatewayDisplayNameLineEdit.text(), 'tdAddress': self.tdAddressLineEdit.text(), 'mdAddress': self.mdAddressLineEdit.text()} | def getData(self):
return {'userID': self.userIDLineEdit.text(), 'password': self.passwordLineEdit.text(), 'authCode': self.authCodeLineEdit.text(), 'userProductInfo': self.userProductInfoLineEdit.text(), 'brokerID': self.brokerIDLineEdit.text(), 'gatewayModule': self.gatewayModuleComboBox.currentText(), 'gatewayDisplayName': self.gatewayDisplayNameLineEdit.text(), 'tdAddress': self.tdAddressLineEdit.text(), 'mdAddress': self.mdAddressLineEdit.text()}<|docstring|>获取数据<|endoftext|> |
d57d3e3ea471c7dd3c5d1ab0432a67d6837d0302097eb077da8f675edf926c78 | def apply_lut(im, lut):
'Apply LUT to an image.\n\n Arguments:\n im: An image.\n lut: A LUT (LookUp Table). The size must be 256.\n\n Returns:\n The output image.\n\n Raises:\n ValueError: if `lut` has invalid size.\n '
if (len(lut) != 256):
raise ValueError('A size of LUT must be 256: {}'.format(len(lut)))
return im.point((lut * len(im.getbands()))) | Apply LUT to an image.
Arguments:
im: An image.
lut: A LUT (LookUp Table). The size must be 256.
Returns:
The output image.
Raises:
ValueError: if `lut` has invalid size. | pilgram/util/apply_lut.py | apply_lut | akiomik/pilgram | 46 | python | def apply_lut(im, lut):
'Apply LUT to an image.\n\n Arguments:\n im: An image.\n lut: A LUT (LookUp Table). The size must be 256.\n\n Returns:\n The output image.\n\n Raises:\n ValueError: if `lut` has invalid size.\n '
if (len(lut) != 256):
raise ValueError('A size of LUT must be 256: {}'.format(len(lut)))
return im.point((lut * len(im.getbands()))) | def apply_lut(im, lut):
'Apply LUT to an image.\n\n Arguments:\n im: An image.\n lut: A LUT (LookUp Table). The size must be 256.\n\n Returns:\n The output image.\n\n Raises:\n ValueError: if `lut` has invalid size.\n '
if (len(lut) != 256):
raise ValueError('A size of LUT must be 256: {}'.format(len(lut)))
return im.point((lut * len(im.getbands())))<|docstring|>Apply LUT to an image.
Arguments:
im: An image.
lut: A LUT (LookUp Table). The size must be 256.
Returns:
The output image.
Raises:
ValueError: if `lut` has invalid size.<|endoftext|> |
d90b3e296812a4940916e6a50458837fb21986c09f5d7ec55bc6ba0c2541eeef | @classmethod
def create_database_and_manager(cls, hostname, database, username, password, name=None, fail=True):
'\n Create database and manager at the same time.\n '
db = Database.pymysql(hostname=hostname, database=database, username=username, password=password)
obj = cls(db, fail=fail)
if (name is not None):
obj.seller.seller_name = name
obj.buyer.buyer_name = name
return obj | Create database and manager at the same time. | pydarkstar/auction/manager.py | create_database_and_manager | Korrbit/pydarkstar | 0 | python | @classmethod
def create_database_and_manager(cls, hostname, database, username, password, name=None, fail=True):
'\n \n '
db = Database.pymysql(hostname=hostname, database=database, username=username, password=password)
obj = cls(db, fail=fail)
if (name is not None):
obj.seller.seller_name = name
obj.buyer.buyer_name = name
return obj | @classmethod
def create_database_and_manager(cls, hostname, database, username, password, name=None, fail=True):
'\n \n '
db = Database.pymysql(hostname=hostname, database=database, username=username, password=password)
obj = cls(db, fail=fail)
if (name is not None):
obj.seller.seller_name = name
obj.buyer.buyer_name = name
return obj<|docstring|>Create database and manager at the same time.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.