code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# -*- coding: utf-8 -*-
"""Test data"""
import os
import pytest
TEST_PATH = os.path.abspath(os.path.dirname(__file__))
TEST_DATA_PATH = os.path.join(TEST_PATH, 'test_data')
def _get_test_data(filename):
file_path = os.path.join(TEST_DATA_PATH, filename)
with open(file_path, encoding='utf-8') as data_file:
data = data_file.read()
return data
@pytest.fixture(name='map_empty_conf')
def map_empty_conf():
return _get_test_data('map-empty.conf')
@pytest.fixture(name='map_conf')
def map_conf():
return _get_test_data('map.conf')
@pytest.fixture(name='location_api')
def location_api():
return _get_test_data('location-api.conf')
@pytest.fixture(name='location_policy')
def location_policy():
return _get_test_data('location-policy.conf')
@pytest.fixture(name='upstream_conf')
def upstream_conf():
return _get_test_data('upstream.conf')
@pytest.fixture(name='server_conf')
def server_conf():
return _get_test_data('server.conf')
@pytest.fixture(name='server_no_tls_conf')
def server_no_tls_conf():
return _get_test_data('server-no-tls.conf')
@pytest.fixture(name='server_location_conf')
def server_location_conf():
return _get_test_data('server-location.conf')
@pytest.fixture(name='server_location_no_tls_conf')
def server_location_no_tls_conf():
return _get_test_data('server-location-no-tls.conf')
@pytest.fixture(name='nginx_conf')
def nginx_conf():
return _get_test_data('nginx.conf')
| [
"pytest.fixture",
"os.path.dirname",
"os.path.join"
] | [((137, 173), 'os.path.join', 'os.path.join', (['TEST_PATH', '"""test_data"""'], {}), "(TEST_PATH, 'test_data')\n", (149, 173), False, 'import os\n'), ((369, 406), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""map_empty_conf"""'}), "(name='map_empty_conf')\n", (383, 406), False, 'import pytest\n'), ((476, 507), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""map_conf"""'}), "(name='map_conf')\n", (490, 507), False, 'import pytest\n'), ((565, 600), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""location_api"""'}), "(name='location_api')\n", (579, 600), False, 'import pytest\n'), ((671, 709), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""location_policy"""'}), "(name='location_policy')\n", (685, 709), False, 'import pytest\n'), ((786, 822), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""upstream_conf"""'}), "(name='upstream_conf')\n", (800, 822), False, 'import pytest\n'), ((890, 924), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""server_conf"""'}), "(name='server_conf')\n", (904, 924), False, 'import pytest\n'), ((988, 1029), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""server_no_tls_conf"""'}), "(name='server_no_tls_conf')\n", (1002, 1029), False, 'import pytest\n'), ((1107, 1150), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""server_location_conf"""'}), "(name='server_location_conf')\n", (1121, 1150), False, 'import pytest\n'), ((1232, 1282), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""server_location_no_tls_conf"""'}), "(name='server_location_no_tls_conf')\n", (1246, 1282), False, 'import pytest\n'), ((1378, 1411), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""nginx_conf"""'}), "(name='nginx_conf')\n", (1392, 1411), False, 'import pytest\n'), ((93, 118), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (108, 118), False, 'import os\n'), ((222, 260), 'os.path.join', 'os.path.join', (['TEST_DATA_PATH', 'filename'], {}), '(TEST_DATA_PATH, filename)\n', (234, 260), False, 'import os\n')] |
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 460862264
"""
"""
random actions, total chaos
"""
board = gamma_new(5, 5, 2, 16)
assert board is not None
assert gamma_move(board, 1, 0, 0) == 1
assert gamma_move(board, 2, 2, 3) == 1
assert gamma_golden_move(board, 2, 0, 0) == 1
assert gamma_move(board, 1, 1, 0) == 1
assert gamma_move(board, 1, 1, 4) == 1
assert gamma_move(board, 2, 3, 3) == 1
assert gamma_golden_move(board, 2, 0, 1) == 0
assert gamma_move(board, 1, 0, 2) == 1
assert gamma_move(board, 1, 2, 0) == 1
assert gamma_move(board, 2, 0, 4) == 1
assert gamma_move(board, 1, 3, 4) == 1
assert gamma_move(board, 2, 4, 3) == 1
assert gamma_move(board, 1, 1, 2) == 1
assert gamma_move(board, 2, 3, 2) == 1
assert gamma_move(board, 1, 2, 4) == 1
assert gamma_move(board, 2, 3, 1) == 1
assert gamma_move(board, 2, 4, 1) == 1
assert gamma_move(board, 1, 3, 4) == 0
assert gamma_move(board, 1, 0, 1) == 1
board805648356 = gamma_board(board)
assert board805648356 is not None
assert board805648356 == ("2111.\n"
"..222\n"
"11.2.\n"
"1..22\n"
"211..\n")
del board805648356
board805648356 = None
assert gamma_move(board, 1, 3, 4) == 0
assert gamma_move(board, 1, 4, 3) == 0
assert gamma_move(board, 2, 2, 2) == 1
board964345287 = gamma_board(board)
assert board964345287 is not None
assert board964345287 == ("2111.\n"
"..222\n"
"1122.\n"
"1..22\n"
"211..\n")
del board964345287
board964345287 = None
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_move(board, 1, 2, 0) == 0
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_move(board, 2, 0, 1) == 0
assert gamma_free_fields(board, 2) == 8
assert gamma_move(board, 1, 1, 2) == 0
assert gamma_golden_possible(board, 2) == 0
assert gamma_move(board, 1, 1, 1) == 1
assert gamma_move(board, 1, 3, 0) == 1
assert gamma_move(board, 2, 0, 4) == 0
assert gamma_move(board, 1, 3, 0) == 0
assert gamma_move(board, 1, 1, 4) == 0
assert gamma_move(board, 2, 4, 4) == 1
assert gamma_move(board, 2, 4, 4) == 0
assert gamma_move(board, 1, 1, 3) == 1
assert gamma_move(board, 1, 1, 4) == 0
assert gamma_free_fields(board, 1) == 4
assert gamma_golden_move(board, 1, 4, 4) == 1
assert gamma_move(board, 2, 0, 4) == 0
assert gamma_move(board, 1, 1, 2) == 0
assert gamma_move(board, 1, 2, 2) == 0
assert gamma_golden_possible(board, 1) == 0
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_move(board, 1, 0, 4) == 0
assert gamma_move(board, 1, 4, 1) == 0
assert gamma_golden_possible(board, 1) == 0
assert gamma_move(board, 2, 0, 4) == 0
assert gamma_golden_possible(board, 2) == 0
assert gamma_move(board, 1, 2, 4) == 0
assert gamma_move(board, 1, 1, 2) == 0
assert gamma_busy_fields(board, 1) == 12
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 1, 1, 2) == 0
assert gamma_move(board, 1, 4, 1) == 0
assert gamma_golden_possible(board, 1) == 0
assert gamma_move(board, 2, 4, 0) == 1
assert gamma_move(board, 2, 0, 4) == 0
assert gamma_golden_move(board, 2, 1, 1) == 0
assert gamma_move(board, 1, 3, 0) == 0
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_move(board, 1, 1, 2) == 0
assert gamma_busy_fields(board, 1) == 12
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 2, 3, 4) == 0
assert gamma_move(board, 1, 1, 0) == 0
assert gamma_move(board, 2, 1, 4) == 0
assert gamma_move(board, 2, 3, 4) == 0
assert gamma_move(board, 1, 1, 2) == 0
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_move(board, 1, 4, 0) == 0
assert gamma_move(board, 1, 1, 4) == 0
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 1, 3, 2) == 0
assert gamma_golden_move(board, 1, 2, 3) == 0
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_busy_fields(board, 2) == 10
gamma_delete(board)
| [
"part1.gamma_move",
"part1.gamma_board",
"part1.gamma_new",
"part1.gamma_golden_move",
"part1.gamma_delete",
"part1.gamma_busy_fields",
"part1.gamma_golden_possible",
"part1.gamma_free_fields"
] | [((283, 305), 'part1.gamma_new', 'gamma_new', (['(5)', '(5)', '(2)', '(16)'], {}), '(5, 5, 2, 16)\n', (292, 305), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1126, 1144), 'part1.gamma_board', 'gamma_board', (['board'], {}), '(board)\n', (1137, 1144), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1436, 1454), 'part1.gamma_board', 'gamma_board', (['board'], {}), '(board)\n', (1447, 1454), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((4063, 4082), 'part1.gamma_delete', 'gamma_delete', (['board'], {}), '(board)\n', (4075, 4082), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((340, 366), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(0)', '(0)'], {}), '(board, 1, 0, 0)\n', (350, 366), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((380, 406), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(2)', '(3)'], {}), '(board, 2, 2, 3)\n', (390, 406), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((420, 453), 'part1.gamma_golden_move', 'gamma_golden_move', (['board', '(2)', '(0)', '(0)'], {}), '(board, 2, 0, 0)\n', (437, 453), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((467, 493), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(1)', '(0)'], {}), '(board, 1, 1, 0)\n', (477, 493), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((507, 533), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(1)', '(4)'], {}), '(board, 1, 1, 4)\n', (517, 533), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((547, 573), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(3)', '(3)'], {}), '(board, 2, 3, 3)\n', (557, 573), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((587, 620), 'part1.gamma_golden_move', 'gamma_golden_move', (['board', '(2)', '(0)', '(1)'], {}), '(board, 2, 0, 1)\n', (604, 620), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((634, 660), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(0)', '(2)'], {}), '(board, 1, 0, 2)\n', (644, 660), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((674, 700), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(2)', '(0)'], {}), '(board, 1, 2, 0)\n', (684, 700), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((714, 740), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(0)', '(4)'], {}), '(board, 2, 0, 4)\n', (724, 740), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((754, 780), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(3)', '(4)'], {}), '(board, 1, 3, 4)\n', (764, 780), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((794, 820), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(4)', '(3)'], {}), '(board, 2, 4, 3)\n', (804, 820), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((834, 860), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(1)', '(2)'], {}), '(board, 1, 1, 2)\n', (844, 860), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((874, 900), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(3)', '(2)'], {}), '(board, 2, 3, 2)\n', (884, 900), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((914, 940), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(2)', '(4)'], {}), '(board, 1, 2, 4)\n', (924, 940), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((954, 980), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(3)', '(1)'], {}), '(board, 2, 3, 1)\n', (964, 980), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((994, 1020), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(4)', '(1)'], {}), '(board, 2, 4, 1)\n', (1004, 1020), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1034, 1060), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(3)', '(4)'], {}), '(board, 1, 3, 4)\n', (1044, 1060), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1074, 1100), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(0)', '(1)'], {}), '(board, 1, 0, 1)\n', (1084, 1100), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1304, 1330), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(3)', '(4)'], {}), '(board, 1, 3, 4)\n', (1314, 1330), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1344, 1370), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(4)', '(3)'], {}), '(board, 1, 4, 3)\n', (1354, 1370), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1384, 1410), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(2)', '(2)'], {}), '(board, 2, 2, 2)\n', (1394, 1410), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1614, 1640), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(3)', '(1)'], {}), '(board, 1, 3, 1)\n', (1624, 1640), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1654, 1680), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(2)', '(0)'], {}), '(board, 1, 2, 0)\n', (1664, 1680), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1694, 1720), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(1)', '(0)'], {}), '(board, 2, 1, 0)\n', (1704, 1720), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1734, 1760), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(0)', '(1)'], {}), '(board, 2, 0, 1)\n', (1744, 1760), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1774, 1801), 'part1.gamma_free_fields', 'gamma_free_fields', (['board', '(2)'], {}), '(board, 2)\n', (1791, 1801), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1815, 1841), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(1)', '(2)'], {}), '(board, 1, 1, 2)\n', (1825, 1841), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1855, 1886), 'part1.gamma_golden_possible', 'gamma_golden_possible', (['board', '(2)'], {}), '(board, 2)\n', (1876, 1886), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1900, 1926), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(1)', '(1)'], {}), '(board, 1, 1, 1)\n', (1910, 1926), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1940, 1966), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(3)', '(0)'], {}), '(board, 1, 3, 0)\n', (1950, 1966), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1980, 2006), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(0)', '(4)'], {}), '(board, 2, 0, 4)\n', (1990, 2006), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2020, 2046), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(3)', '(0)'], {}), '(board, 1, 3, 0)\n', (2030, 2046), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2060, 2086), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(1)', '(4)'], {}), '(board, 1, 1, 4)\n', (2070, 2086), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2100, 2126), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(4)', '(4)'], {}), '(board, 2, 4, 4)\n', (2110, 2126), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2140, 2166), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(4)', '(4)'], {}), '(board, 2, 4, 4)\n', (2150, 2166), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2180, 2206), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(1)', '(3)'], {}), '(board, 1, 1, 3)\n', (2190, 2206), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2220, 2246), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(1)', '(4)'], {}), '(board, 1, 1, 4)\n', (2230, 2246), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2260, 2287), 'part1.gamma_free_fields', 'gamma_free_fields', (['board', '(1)'], {}), '(board, 1)\n', (2277, 2287), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2301, 2334), 'part1.gamma_golden_move', 'gamma_golden_move', (['board', '(1)', '(4)', '(4)'], {}), '(board, 1, 4, 4)\n', (2318, 2334), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2348, 2374), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(0)', '(4)'], {}), '(board, 2, 0, 4)\n', (2358, 2374), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2388, 2414), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(1)', '(2)'], {}), '(board, 1, 1, 2)\n', (2398, 2414), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2428, 2454), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(2)', '(2)'], {}), '(board, 1, 2, 2)\n', (2438, 2454), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2468, 2499), 'part1.gamma_golden_possible', 'gamma_golden_possible', (['board', '(1)'], {}), '(board, 1)\n', (2489, 2499), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2513, 2539), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(1)', '(2)'], {}), '(board, 2, 1, 2)\n', (2523, 2539), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2553, 2579), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(0)', '(4)'], {}), '(board, 1, 0, 4)\n', (2563, 2579), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2593, 2619), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(4)', '(1)'], {}), '(board, 1, 4, 1)\n', (2603, 2619), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2633, 2664), 'part1.gamma_golden_possible', 'gamma_golden_possible', (['board', '(1)'], {}), '(board, 1)\n', (2654, 2664), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2678, 2704), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(0)', '(4)'], {}), '(board, 2, 0, 4)\n', (2688, 2704), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2718, 2749), 'part1.gamma_golden_possible', 'gamma_golden_possible', (['board', '(2)'], {}), '(board, 2)\n', (2739, 2749), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2763, 2789), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(2)', '(4)'], {}), '(board, 1, 2, 4)\n', (2773, 2789), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2803, 2829), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(1)', '(2)'], {}), '(board, 1, 1, 2)\n', (2813, 2829), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2843, 2870), 'part1.gamma_busy_fields', 'gamma_busy_fields', (['board', '(1)'], {}), '(board, 1)\n', (2860, 2870), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2885, 2911), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(2)', '(4)'], {}), '(board, 2, 2, 4)\n', (2895, 2911), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2925, 2951), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(3)', '(0)'], {}), '(board, 2, 3, 0)\n', (2935, 2951), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2965, 2991), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(1)', '(2)'], {}), '(board, 1, 1, 2)\n', (2975, 2991), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3005, 3031), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(4)', '(1)'], {}), '(board, 1, 4, 1)\n', (3015, 3031), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3045, 3076), 'part1.gamma_golden_possible', 'gamma_golden_possible', (['board', '(1)'], {}), '(board, 1)\n', (3066, 3076), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3090, 3116), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(4)', '(0)'], {}), '(board, 2, 4, 0)\n', (3100, 3116), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3130, 3156), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(0)', '(4)'], {}), '(board, 2, 0, 4)\n', (3140, 3156), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3170, 3203), 'part1.gamma_golden_move', 'gamma_golden_move', (['board', '(2)', '(1)', '(1)'], {}), '(board, 2, 1, 1)\n', (3187, 3203), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3217, 3243), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(3)', '(0)'], {}), '(board, 1, 3, 0)\n', (3227, 3243), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3257, 3283), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(3)', '(1)'], {}), '(board, 2, 3, 1)\n', (3267, 3283), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3297, 3323), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(1)', '(2)'], {}), '(board, 1, 1, 2)\n', (3307, 3323), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3337, 3364), 'part1.gamma_busy_fields', 'gamma_busy_fields', (['board', '(1)'], {}), '(board, 1)\n', (3354, 3364), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3379, 3405), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(1)', '(2)'], {}), '(board, 2, 1, 2)\n', (3389, 3405), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3419, 3445), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(3)', '(0)'], {}), '(board, 2, 3, 0)\n', (3429, 3445), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3459, 3485), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(3)', '(0)'], {}), '(board, 2, 3, 0)\n', (3469, 3485), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3499, 3525), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(3)', '(4)'], {}), '(board, 2, 3, 4)\n', (3509, 3525), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3539, 3565), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(1)', '(0)'], {}), '(board, 1, 1, 0)\n', (3549, 3565), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3579, 3605), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(1)', '(4)'], {}), '(board, 2, 1, 4)\n', (3589, 3605), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3619, 3645), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(3)', '(4)'], {}), '(board, 2, 3, 4)\n', (3629, 3645), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3659, 3685), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(1)', '(2)'], {}), '(board, 1, 1, 2)\n', (3669, 3685), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3699, 3725), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(3)', '(0)'], {}), '(board, 2, 3, 0)\n', (3709, 3725), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3739, 3765), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(2)', '(0)'], {}), '(board, 2, 2, 0)\n', (3749, 3765), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3779, 3805), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(4)', '(0)'], {}), '(board, 1, 4, 0)\n', (3789, 3805), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3819, 3845), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(1)', '(4)'], {}), '(board, 1, 1, 4)\n', (3829, 3845), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3859, 3885), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(3)', '(0)'], {}), '(board, 2, 3, 0)\n', (3869, 3885), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3899, 3925), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(3)', '(2)'], {}), '(board, 1, 3, 2)\n', (3909, 3925), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3939, 3972), 'part1.gamma_golden_move', 'gamma_golden_move', (['board', '(1)', '(2)', '(3)'], {}), '(board, 1, 2, 3)\n', (3956, 3972), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3986, 4012), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(3)', '(0)'], {}), '(board, 2, 3, 0)\n', (3996, 4012), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((4026, 4053), 'part1.gamma_busy_fields', 'gamma_busy_fields', (['board', '(2)'], {}), '(board, 2)\n', (4043, 4053), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n')] |
# Generated by Django 3.2.7 on 2021-10-03 15:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sellers', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='seller',
old_name='description',
new_name='descr',
),
]
| [
"django.db.migrations.RenameField"
] | [((216, 305), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""seller"""', 'old_name': '"""description"""', 'new_name': '"""descr"""'}), "(model_name='seller', old_name='description',\n new_name='descr')\n", (238, 305), False, 'from django.db import migrations\n')] |
import sys
from collections import namedtuple, defaultdict
import concrete
from concrete.util import CommunicationReader
import numpy as np
import json
Mention = namedtuple("Mention", "text start end sentence entityType confidence uuid")
def get_entities(comm, entity_tool):
"""
Returns:
list of concrete.Entity objects
"""
entity_set_index = concrete.util.metadata.get_index_of_tool(
comm.entitySetList, entity_tool)
if entity_set_index == -1:
print(f"Could not find EntitySet with tool name {entity_tool}")
return []
else:
return comm.entitySetList[entity_set_index].entityList
def comm_to_dict(comm, entity_tool):
output_dict = {}
output_dict['doc_id'] = comm.id
# Assume single section in sectionList
sentences = []
sentence_tokenization_uuids = {}
offset_dicts = {
"idx_to_char_offsets": {},
"char_offsets_to_idx": {}
}
# Read comm into list of tokenized sentences.
for i, sentence in enumerate(comm.sectionList[0].sentenceList):
sentence_tokenization_uuids[i] = sentence.tokenization.uuid.uuidString
sentence_text = []
idx_to_char_offsets = {}
char_offsets_to_idx = {}
for token in sentence.tokenization.tokenList.tokenList:
token_idx = token.tokenIndex
token_start = token.textSpan.start
token_ending = token.textSpan.ending
token = token.text
sentence_text.append(token)
idx_to_char_offsets[token_idx] = (token_start, token_ending)
char_offsets_to_idx[(token_start, token_ending)] = token_idx
offset_dicts["idx_to_char_offsets"][i] = idx_to_char_offsets
offset_dicts["char_offsets_to_idx"][i] = char_offsets_to_idx
sentences.append(sentence_text)
output_dict["sentences"] = sentences
output_dict["offset_dicts"] = offset_dicts
# Compute offsets
sentence_offsets = np.cumsum([0] + [len(s) for s in sentences])
tokenization_to_sent = {uuid:idx
for idx, uuid in sentence_tokenization_uuids.items()}
output_dict['entity_set_list'] = []
# Read through entity mention set list
mention_list = []
mention_uuid_map = {}
mention_skip_map = {}
for ms_idx, mention_set in enumerate(comm.entityMentionSetList):
# print ("{} mention_list: {}".format(ms_idx, len(mention_set.mentionList)))
for mention in mention_set.mentionList:
tokens = mention.tokens.tokenIndexList
tokenizationId = mention.tokens.tokenizationId
sentId = tokenization_to_sent[mention.tokens.tokenizationId.uuidString]
sent_toks = [sentences[sentId][idx] for idx in tokens]
m = Mention(text=mention.text,
start=min(tokens),
end=max(tokens),
sentence=sentId,
entityType=mention.entityType,
confidence=mention.confidence,
uuid=mention.uuid)
mention_list.append(m)
mention_uuid_map[mention.uuid.uuidString] = m
output_dict['mentions'] = [(int(sentence_offsets[m.sentence] + m.start),
int(sentence_offsets[m.sentence] + m.end))
for m in mention_list]
# Convert Mention to doc-level (start, end) and update mapping
mention_map = defaultdict(list)
for m in mention_list:
start = int(sentence_offsets[m.sentence] + m.start)
end = int(sentence_offsets[m.sentence] + m.end)
mention_map[(start, end)].append(m)
output_dict["mention_map"] = mention_map
output_dict["clusters"] = []
# Get entity set list using entity_tool
if entity_tool is not None:
entity_list = get_entities(comm, entity_tool)
uuid_clusters = []
print (f"Found entity list with {len(entity_list)} entities")
for entity in entity_list:
if entity.mentionIdList:
uuid_clusters.append(entity.mentionIdList)
mention_count = 0
clusters = []
seen = set()
for cluster in uuid_clusters:
entity_list = []
for mention_uuid in cluster:
if mention_uuid.uuidString not in seen:
seen.add(mention_uuid.uuidString)
else:
print(f"{mention_uuid} in two different clusters")
m = mention_uuid_map[mention_uuid.uuidString]
start = int(sentence_offsets[m.sentence] + m.start)
end = int(sentence_offsets[m.sentence] + m.end)
entity_list.append([start, end])
if entity_list:
clusters.append(entity_list)
# Ensure every mention is used in exactly one cluster
assert(len(mention_uuid_map) == len(seen))
output_dict["clusters"] = clusters
return (output_dict, comm)
def make_data_iter(path, entity_tool):
for (comm, filename) in CommunicationReader(path):
print (f"Entity_tool: {entity_tool}")
yield comm_to_dict(comm, entity_tool)
if __name__ == "__main__":
input_comms = sys.argv[1]
output_file = sys.argv[2]
if len(sys.argv) > 3:
entity_tool = sys.argv[3]
else:
entity_tool = None
examples_iter = make_data_iter(input_comms, entity_tool)
output_file = open(output_file, 'w+')
for example, _ in examples_iter:
clean_version = {
"sentences": example["sentences"],
"doc_key": example["doc_id"],
}
if example["clusters"]:
clean_version["clusters"] = example["clusters"]
else:
clean_version["clusters"] = [[span] for span in set(example["mentions"])]
num_clusters = len(clean_version["clusters"])
num_mentions = sum([len(cluster) for cluster in clean_version["clusters"]])
num_total_mentions = sum([len(mention) for mention in example["mention_map"].values()])
print(f"Wrote {num_clusters} clusters and {num_mentions} mentions" +
f" (out of {num_total_mentions}) to {clean_version['doc_key']}")
output_file.write(json.dumps(clean_version) + "\n")
| [
"concrete.util.CommunicationReader",
"collections.namedtuple",
"json.dumps",
"concrete.util.metadata.get_index_of_tool",
"collections.defaultdict"
] | [((162, 237), 'collections.namedtuple', 'namedtuple', (['"""Mention"""', '"""text start end sentence entityType confidence uuid"""'], {}), "('Mention', 'text start end sentence entityType confidence uuid')\n", (172, 237), False, 'from collections import namedtuple, defaultdict\n'), ((354, 427), 'concrete.util.metadata.get_index_of_tool', 'concrete.util.metadata.get_index_of_tool', (['comm.entitySetList', 'entity_tool'], {}), '(comm.entitySetList, entity_tool)\n', (394, 427), False, 'import concrete\n'), ((3195, 3212), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3206, 3212), False, 'from collections import namedtuple, defaultdict\n'), ((4613, 4638), 'concrete.util.CommunicationReader', 'CommunicationReader', (['path'], {}), '(path)\n', (4632, 4638), False, 'from concrete.util import CommunicationReader\n'), ((5697, 5722), 'json.dumps', 'json.dumps', (['clean_version'], {}), '(clean_version)\n', (5707, 5722), False, 'import json\n')] |
# coding=utf-8
import re
import argparse
import codecs
from itertools import *
from nltk import ngrams
import string
import yaml
parser = argparse.ArgumentParser()
parser.add_argument('corpus_kalaba')
args = parser.parse_args()
corpus_kalaba = args.corpus_kalaba
# longueur
# position
# contexte
# lettre
phrase = "kik jigib kimaninit dSarrivu lud Nulak lagoboddu"
listex = phrase.strip().split(' ')
#print(groups(listex,len))
#corpus composé de phrases, phrases composées de ngrams, ngrams composés de lettres.
# Fonction1: création du lexique (Ngrams)
def lexique(phrase):
"""liste des ngrams du corpus rangés par longueurs de ces derniers."""
data=[]
phrase=phrase.strip().split()
for i in range(len(phrase)):
for gram in ngrams(phrase,i+1):
if gram not in data:
data.append(gram)
lexique=groups(data,len)
return lexique
def couperSequence(strin):
if " " in strin:
return strin.strip().split()
else:
return list(strin)
def obtenirCorpus(liste):
nvList = []
for element in liste:
elList = couperSequence(element)
for el in elList:
if el not in nvList:
nvList.append(el)
return nvList
def grouperElements(liste, function=len):
"""
fonctions qui groupe selon la fonction qu'on lui donne.
Ainsi pour le kalaba comme pour les graphèmes, nous aurons
besoin de la longueur,
"""
lexique=[]
data=sorted(liste, key=function)
for k,g in groupby(data, function):
lexique.append(list(g))
return lexique
#def PairerMinimales(liste):
with codecs.open(corpus_kalaba,"r",encoding="utf-8") as kalaba:
kalaba = [phrase.strip() for phrase in kalaba]
mots = obtenirCorpus(kalaba)
alphabet = obtenirCorpus(mots)
print(mots)
listLongueur = grouperElements(mots)
liste = []
for element in listLongueur:
for x in grouperElements(element,None):
liste.extend(x)
print(liste)
liste = []
| [
"codecs.open",
"nltk.ngrams",
"argparse.ArgumentParser"
] | [((140, 165), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (163, 165), False, 'import argparse\n'), ((1491, 1540), 'codecs.open', 'codecs.open', (['corpus_kalaba', '"""r"""'], {'encoding': '"""utf-8"""'}), "(corpus_kalaba, 'r', encoding='utf-8')\n", (1502, 1540), False, 'import codecs\n'), ((740, 761), 'nltk.ngrams', 'ngrams', (['phrase', '(i + 1)'], {}), '(phrase, i + 1)\n', (746, 761), False, 'from nltk import ngrams\n')] |
import os
from koala.server import koala_host
from koala.server.fastapi import *
from sample.fastapi.http_api import *
import sample.player
koala_host.init_server(globals().copy(), f"{os.getcwd()}/sample/app.yaml")
koala_host.use_pd()
koala_host.listen_fastapi()
koala_host.run_server()
| [
"koala.server.koala_host.run_server",
"koala.server.koala_host.use_pd",
"os.getcwd",
"koala.server.koala_host.listen_fastapi"
] | [((225, 244), 'koala.server.koala_host.use_pd', 'koala_host.use_pd', ([], {}), '()\n', (242, 244), False, 'from koala.server import koala_host\n'), ((246, 273), 'koala.server.koala_host.listen_fastapi', 'koala_host.listen_fastapi', ([], {}), '()\n', (271, 273), False, 'from koala.server import koala_host\n'), ((275, 298), 'koala.server.koala_host.run_server', 'koala_host.run_server', ([], {}), '()\n', (296, 298), False, 'from koala.server import koala_host\n'), ((193, 204), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (202, 204), False, 'import os\n')] |
from app import db
from app.models import Log, User
def test_login(client):
user = User.query.first()
user.my_logs.append(Log(title="Golf"))
db.session.add(user)
db.session.commit()
response = client.post(
"/sms",
data=dict(From="+4915123595397", Body="Einfach den Ball mal besser treffen!"),
follow_redirects=True,
)
assert response.status_code == 200
assert (
b"Hallo <<EMAIL>>! Ich logge: Einfach den Ball mal besser treffen!"
in response.data
)
| [
"app.models.Log",
"app.db.session.commit",
"app.models.User.query.first",
"app.db.session.add"
] | [((89, 107), 'app.models.User.query.first', 'User.query.first', ([], {}), '()\n', (105, 107), False, 'from app.models import Log, User\n'), ((155, 175), 'app.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (169, 175), False, 'from app import db\n'), ((180, 199), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (197, 199), False, 'from app import db\n'), ((132, 149), 'app.models.Log', 'Log', ([], {'title': '"""Golf"""'}), "(title='Golf')\n", (135, 149), False, 'from app.models import Log, User\n')] |
"""Support for tracking Tesla cars."""
import logging
from homeassistant.components.device_tracker import SOURCE_TYPE_GPS
from homeassistant.components.device_tracker.config_entry import TrackerEntity
from . import DOMAIN as TESLA_DOMAIN, TeslaDevice
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Tesla binary_sensors by config_entry."""
entities = [
TeslaDeviceEntity(
device,
hass.data[TESLA_DOMAIN][config_entry.entry_id]["controller"],
config_entry,
)
for device in hass.data[TESLA_DOMAIN][config_entry.entry_id]["devices"][
"devices_tracker"
]
]
async_add_entities(entities, True)
class TeslaDeviceEntity(TeslaDevice, TrackerEntity):
"""A class representing a Tesla device."""
def __init__(self, tesla_device, controller, config_entry):
"""Initialize the Tesla device scanner."""
super().__init__(tesla_device, controller, config_entry)
self._latitude = None
self._longitude = None
self._attributes = {"trackr_id": self.unique_id}
self._listener = None
async def async_update(self):
"""Update the device info."""
_LOGGER.debug("Updating device position: %s", self.name)
await super().async_update()
location = self.tesla_device.get_location()
if location:
self._latitude = location["latitude"]
self._longitude = location["longitude"]
self._attributes = {
"trackr_id": self.unique_id,
"heading": location["heading"],
"speed": location["speed"],
}
@property
def latitude(self) -> float:
"""Return latitude value of the device."""
return self._latitude
@property
def longitude(self) -> float:
"""Return longitude value of the device."""
return self._longitude
@property
def should_poll(self):
"""Return whether polling is needed."""
return True
@property
def source_type(self):
"""Return the source type, eg gps or router, of the device."""
return SOURCE_TYPE_GPS
| [
"logging.getLogger"
] | [((264, 291), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (281, 291), False, 'import logging\n')] |
import unittest
from hangpy.repositories import ServerRepository
class TestServerRepository(unittest.TestCase):
def test_instantiate(self):
server_repository = FakeServerRepository()
self.assertIsNone(server_repository.get_servers())
self.assertIsNone(server_repository.add_server(None))
self.assertIsNone(server_repository.update_server(None))
class FakeServerRepository(ServerRepository):
def get_servers(self):
return ServerRepository.get_servers(self)
def add_server(self, server):
return ServerRepository.add_server(self, server)
def update_server(self, server):
return ServerRepository.update_server(self, server)
if (__name__ == "__main__"):
unittest.main()
| [
"unittest.main",
"hangpy.repositories.ServerRepository.add_server",
"hangpy.repositories.ServerRepository.get_servers",
"hangpy.repositories.ServerRepository.update_server"
] | [((735, 750), 'unittest.main', 'unittest.main', ([], {}), '()\n', (748, 750), False, 'import unittest\n'), ((475, 509), 'hangpy.repositories.ServerRepository.get_servers', 'ServerRepository.get_servers', (['self'], {}), '(self)\n', (503, 509), False, 'from hangpy.repositories import ServerRepository\n'), ((560, 601), 'hangpy.repositories.ServerRepository.add_server', 'ServerRepository.add_server', (['self', 'server'], {}), '(self, server)\n', (587, 601), False, 'from hangpy.repositories import ServerRepository\n'), ((655, 699), 'hangpy.repositories.ServerRepository.update_server', 'ServerRepository.update_server', (['self', 'server'], {}), '(self, server)\n', (685, 699), False, 'from hangpy.repositories import ServerRepository\n')] |
#!/usr/bin/env python3
VERSION = "0.0.1-sig"
import requests, json, time, traceback
from random import random
from bs4 import BeautifulSoup
WEBHOOK_URL = "https://hooks.slack.com/services/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"
TEST_URL = "https://apps.apple.com/cn/app/goodnotes-4/id778658393"
SLEEP_IN = 3
URL_LIST = [
"https://apps.apple.com/cn/app/goodnotes-5/id1444383602",
]
def get_price(url:str):
headers = {
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language":"zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
"Cache-Control": "max-age=0",
"Connection": "keep-alive",
"DNT": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36"
}
r = requests.get(url, headers = headers)
soup = BeautifulSoup(r.text,'lxml')
info = soup.find("header", class_="product-header")
title = info.find("h1", class_="product-header__title").text.strip().split("\n")[0]
price = info.find("li",class_="app-header__list__item--price").text
return title, price, url
def post_message(text) -> None:
payload = {
"text": text
}
r = requests.post(WEBHOOK_URL, data = json.dumps(payload))
if r.text != 'ok': raise RuntimeError("信息发送失败")
def get_prices(urls:list, sleep_func) -> list:
result = []
for url in urls:
data = get_price(url)
result.append(data)
sleep = sleep_func()
time.sleep(sleep)
return result
def handle_check(urls:list, check_sleep:int, item_sleep):
print("启动价格查询序列...")
mem_list = []
while True:
try:
new_prices = []
new_datas = get_prices(urls, item_sleep)
## 对于每条新数据,遍历
for new_data in new_datas:
## 如果存在变化或者不存在
if not new_data in mem_list:
title = new_data[0]
old_item = None
# 找到旧项目
for mem_item in mem_list:
if title == mem_item[0]:
old_item = mem_item
break
## 删除旧项目
if old_item != None:
mem_list.remove(old_item)
## 更新内存和消息
mem_list.append(new_data)
new_prices.append(new_data)
if len(new_prices) != 0:
print("发现存在数据更新,启动消息发送序列...")
for item in new_prices:
message = "[APP] %s 价格发生变动,当前价格: %s <%s|查看>"%item
print("发现更新:%s"%message)
post_message(message)
except Exception as e:
print("发生错误:")
print(traceback.format_exc())
finally:
time.sleep(check_sleep)
print("价格查询序列结束...")
def simple_time():
return int(random() * SLEEP_IN)
if __name__ == "__main__":
import argparse
p = argparse.ArgumentParser(prog='AppStore 价格监测程序',
description="监测 App Store 价格变化,当其发生改变,则推送通知到 Slack")
p.add_argument("-s","--slack", dest="slack", help="Slack WebHook URL", type=str)
p.add_argument("-t1","--time1", dest="time1", help="轮询时长,单位为 s", default=1000, type=int)
p.add_argument("-t2","--time2", dest="time2", help="单个查询间隔时长,单位为 s", type=int)
args = p.parse_args()
if args.slack != None:
WEBHOOK_URL = args.slack
if args.time2 != None:
SLEEP_IN = args.time2
print("Checking with args", args)
handle_check(URL_LIST, args.time1, simple_time) | [
"traceback.format_exc",
"argparse.ArgumentParser",
"json.dumps",
"time.sleep",
"requests.get",
"bs4.BeautifulSoup",
"random.random"
] | [((889, 923), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (901, 923), False, 'import requests, json, time, traceback\n'), ((934, 963), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r.text', '"""lxml"""'], {}), "(r.text, 'lxml')\n", (947, 963), False, 'from bs4 import BeautifulSoup\n'), ((2591, 2696), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""AppStore 价格监测程序"""', 'description': '"""监测 App Store 价格变化,当其发生改变,则推送通知到 Slack"""'}), "(prog='AppStore 价格监测程序', description=\n '监测 App Store 价格变化,当其发生改变,则推送通知到 Slack')\n", (2614, 2696), False, 'import argparse\n'), ((1519, 1536), 'time.sleep', 'time.sleep', (['sleep'], {}), '(sleep)\n', (1529, 1536), False, 'import requests, json, time, traceback\n'), ((1299, 1318), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (1309, 1318), False, 'import requests, json, time, traceback\n'), ((2442, 2465), 'time.sleep', 'time.sleep', (['check_sleep'], {}), '(check_sleep)\n', (2452, 2465), False, 'import requests, json, time, traceback\n'), ((2520, 2528), 'random.random', 'random', ([], {}), '()\n', (2526, 2528), False, 'from random import random\n'), ((2404, 2426), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2424, 2426), False, 'import requests, json, time, traceback\n')] |
#!/usr/bin/env python
# pylint: disable=invalid-name,ungrouped-imports
import logging
import math
import os
from importlib import import_module
import coloredlogs
import numpy as np
import tensorflow as tf
from scipy.misc import imread
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import (array_ops, control_flow_ops, functional_ops,
math_ops)
def get_hand_segmentation_for_image(image_file, hand_dir):
return "{}/{}".format(hand_dir, os.path.basename(image_file).replace("image", "hand"))
def get_patho_segmentation_for_image(image_file, patho_dir):
return "{}/{}".format(patho_dir, os.path.basename(image_file).replace("image", "patho"))
def get_combined_segmentation_for_image(image_file, combined_dir):
return "{}/{}".format(combined_dir, os.path.basename(image_file).replace("image", "combined"))
image_subdir = "image"
hand_subdir = "hand"
patho_subdir = "patho"
combined_subdir = "combined"
data_subdirs = {
image_subdir: image_subdir,
hand_subdir: hand_subdir,
patho_subdir: patho_subdir,
combined_subdir: combined_subdir
}
image_transformation_functions = {
image_subdir: lambda x, y: x,
hand_subdir: get_hand_segmentation_for_image,
combined_subdir: get_combined_segmentation_for_image,
patho_subdir: get_patho_segmentation_for_image
}
def is_valid_file(file_name, pattern):
return (not pattern or pattern in file_name) and (file_name.endswith(".png") or file_name.endswith(".jpg"))
def prepare_images(images, is_colored):
tf.logging.info("Preparing {} images".format(len(images)))
# normalize the images to the range of [-1, 1]
normalized_images = np.array(images, dtype=np.float32) / 127.5 - 1
return normalized_images if is_colored else \
normalized_images.reshape(*normalized_images.shape, 1) # add dimension for "color depth"
def segmentation_score(output, ground_truth):
assert output.shape[0] == ground_truth.shape[0]
predicted = tf.cast(output >= 0, tf.uint8)
actual = tf.cast(ground_truth >= 0, tf.uint8)
tp = tf.count_nonzero(predicted * actual)
# tn = tf.count_nonzero((predicted - 1) * (actual - 1))
fp = tf.count_nonzero(predicted * (actual - 1))
fn = tf.count_nonzero((predicted - 1) * actual)
precision = tp / (tp + fp)
recall = tp / (tp + fn)
return 2 * precision * recall / (precision + recall)
def logistic(logit):
exp = np.exp(-logit) if isinstance(logit, np.ndarray) else tf.exp(-logit)
return 1 / (1 + exp)
# since it's unvailable in 1.12.0, this is copied from:
# https://github.com/tensorflow/tensorflow/blob/r1.13/tensorflow/contrib/gan/python/eval/python/classifier_metrics_impl.py
def kernel_classifier_distance_and_std_from_activations(real_activations,
generated_activations,
max_block_size=1024,
dtype=None):
# pylint: disable=no-member
"""Kernel "classifier" distance for evaluating a generative model.
This methods computes the kernel classifier distance from activations of
real images and generated images. This can be used independently of the
kernel_classifier_distance() method, especially in the case of using large
batches during evaluation where we would like to precompute all of the
activations before computing the classifier distance, or if we want to
compute multiple metrics based on the same images. It also returns a rough
estimate of the standard error of the estimator.
This technique is described in detail in https://arxiv.org/abs/1801.01401.
Given two distributions P and Q of activations, this function calculates
E_{X, X' ~ P}[k(X, X')] + E_{Y, Y' ~ Q}[k(Y, Y')]
- 2 E_{X ~ P, Y ~ Q}[k(X, Y)]
where k is the polynomial kernel
k(x, y) = ( x^T y / dimension + 1 )^3.
This captures how different the distributions of real and generated images'
visual features are. Like the Frechet distance (and unlike the Inception
score), this is a true distance and incorporates information about the
target images. Unlike the Frechet score, this function computes an
*unbiased* and asymptotically normal estimator, which makes comparing
estimates across models much more intuitive.
The estimator used takes time quadratic in max_block_size. Larger values of
max_block_size will decrease the variance of the estimator but increase the
computational cost. This differs slightly from the estimator used by the
original paper; it is the block estimator of https://arxiv.org/abs/1307.1954.
The estimate of the standard error will also be more reliable when there are
more blocks, i.e. when max_block_size is smaller.
NOTE: the blocking code assumes that real_activations and
generated_activations are both in random order. If either is sorted in a
meaningful order, the estimator will behave poorly.
Args:
real_activations: 2D Tensor containing activations of real data. Shape is
[batch_size, activation_size].
generated_activations: 2D Tensor containing activations of generated data.
Shape is [batch_size, activation_size].
max_block_size: integer, default 1024. The distance estimator splits samples
into blocks for computational efficiency. Larger values are more
computationally expensive but decrease the variance of the distance
estimate. Having a smaller block size also gives a better estimate of the
standard error.
dtype: if not None, coerce activations to this dtype before computations.
Returns:
The Kernel Inception Distance. A floating-point scalar of the same type
as the output of the activations.
An estimate of the standard error of the distance estimator (a scalar of
the same type).
"""
real_activations.shape.assert_has_rank(2)
generated_activations.shape.assert_has_rank(2)
real_activations.shape[1].assert_is_compatible_with(
generated_activations.shape[1])
if dtype is None:
dtype = real_activations.dtype
assert generated_activations.dtype == dtype
else:
real_activations = math_ops.cast(real_activations, dtype)
generated_activations = math_ops.cast(generated_activations, dtype)
# Figure out how to split the activations into blocks of approximately
# equal size, with none larger than max_block_size.
n_r = array_ops.shape(real_activations)[0]
n_g = array_ops.shape(generated_activations)[0]
n_bigger = math_ops.maximum(n_r, n_g)
n_blocks = math_ops.to_int32(math_ops.ceil(n_bigger / max_block_size))
v_r = n_r // n_blocks
v_g = n_g // n_blocks
n_plusone_r = n_r - v_r * n_blocks
n_plusone_g = n_g - v_g * n_blocks
sizes_r = array_ops.concat([
array_ops.fill([n_blocks - n_plusone_r], v_r),
array_ops.fill([n_plusone_r], v_r + 1),
], 0)
sizes_g = array_ops.concat([
array_ops.fill([n_blocks - n_plusone_g], v_g),
array_ops.fill([n_plusone_g], v_g + 1),
], 0)
zero = array_ops.zeros([1], dtype=dtypes.int32)
inds_r = array_ops.concat([zero, math_ops.cumsum(sizes_r)], 0)
inds_g = array_ops.concat([zero, math_ops.cumsum(sizes_g)], 0)
dim = math_ops.cast(real_activations.shape[1], dtype)
def compute_kid_block(i):
'Compute the ith block of the KID estimate.'
r_s = inds_r[i]
r_e = inds_r[i + 1]
r = real_activations[r_s:r_e]
m = math_ops.cast(r_e - r_s, dtype)
g_s = inds_g[i]
g_e = inds_g[i + 1]
g = generated_activations[g_s:g_e]
n = math_ops.cast(g_e - g_s, dtype)
k_rr = (math_ops.matmul(r, r, transpose_b=True) / dim + 1)**3
k_rg = (math_ops.matmul(r, g, transpose_b=True) / dim + 1)**3
k_gg = (math_ops.matmul(g, g, transpose_b=True) / dim + 1)**3
return (-2 * math_ops.reduce_mean(k_rg) +
(math_ops.reduce_sum(k_rr) - math_ops.trace(k_rr)) / (m * (m - 1)) +
(math_ops.reduce_sum(k_gg) - math_ops.trace(k_gg)) / (n * (n - 1)))
ests = functional_ops.map_fn(
compute_kid_block, math_ops.range(n_blocks), dtype=dtype, back_prop=False)
mn = math_ops.reduce_mean(ests)
# nn_impl.moments doesn't use the Bessel correction, which we want here
n_blocks_ = math_ops.cast(n_blocks, dtype)
var = control_flow_ops.cond(
math_ops.less_equal(n_blocks, 1),
lambda: array_ops.constant(float('nan'), dtype=dtype),
lambda: math_ops.reduce_sum(math_ops.square(ests - mn)) / (n_blocks_ - 1))
return mn, math_ops.sqrt(var / n_blocks_)
def load_model(config):
module_names = [
"noise_to_image_models",
"image_to_image_models",
"deep_image_to_image_models",
"deep_noise_to_image_models",
"deep_noise_to_image_models",
"deep_noise_to_square_image_models",
"deep_image_super_resolution_models"
]
for module_name in module_names:
try:
return load_class_from_module(module_name, config.model_name)(config)
except AttributeError:
pass
assert False, "No model with name '{}' found".format(config.model_name)
def load_checkpoint(config, checkpoint_number=None, generator=None, discriminator=None,
first_generator=None, second_generator=None, first_discriminator=None, second_discriminator=None):
# pylint: disable=too-many-arguments
tf.logging.info("Loading model from '{}', checkpoint {}".format(config.checkpoint_dir, checkpoint_number))
models = {
"generator": generator,
"discriminator": discriminator,
"first_generator": first_generator,
"first_discriminator": first_discriminator,
"second_generator": second_generator,
"second_discriminator": second_discriminator
}
models = {key: models[key] for key in models if models[key]}
checkpoint = tf.train.Checkpoint(**models)
checkpoint_to_restore = "{}/ckpt-{}".format(config.checkpoint_dir, checkpoint_number) \
if checkpoint_number else tf.train.latest_checkpoint(config.checkpoint_dir)
checkpoint.restore(checkpoint_to_restore)
def load_image_names(data_dir, pattern=None):
image_dir = os.path.join("data", data_dir, image_subdir)
tf.logging.info("Loading image names from '{}'{}".format(
image_dir, " matching pattern '{}'".format(pattern) if pattern else ""))
return sorted([os.path.join(image_dir, file_name) for file_name in os.listdir(image_dir) if is_valid_file(file_name, pattern)])
def augment_images(images, original, flip_lr, flip_ud):
assert isinstance(images[0], (np.ndarray, tf.Tensor))
if not flip_lr and not flip_ud:
assert original
return images
augmented_images = []
if flip_lr:
tf.logging.info("Adding L-R-flipped images")
if flip_ud:
tf.logging.info("Adding U-D-flipped images")
for image in images:
if original:
augmented_images.append(image)
if flip_lr:
augmented_images.append(np.fliplr(image))
if flip_ud:
augmented_images.append(np.flipud(image))
if flip_lr and flip_ud:
augmented_images.append(np.flipud(np.fliplr(image)))
return augmented_images
def load_images(image_names, data_dir, image_type, original=True, flip_lr=False, flip_ud=False):
image_dir = os.path.join("data", data_dir, data_subdirs[image_type])
tf.logging.info("Loading {} images from '{}'".format(len(image_names), image_dir))
is_colored = image_type == "image"
get_file_name = lambda x: image_transformation_functions[image_type](x, image_dir)
return prepare_images(
augment_images(
[imread(get_file_name(file_name), mode="RGB" if is_colored else "L") for file_name in image_names],
original, flip_lr, flip_ud),
is_colored)
def configure_logging():
tf.logging.set_verbosity(tf.logging.INFO)
coloredlogs.install(level="INFO")
coloredlogs.DEFAULT_LEVEL_STYLES = {
"debug": {"color": "white", "bold": False},
"info": {"color": "white", "bold": True},
"warning": {"color": "yellow", "bold": True},
"error": {"color": "red", "bold": True},
"fatal": {"color": "magenta", "bold": True},
}
logger = logging.getLogger("tensorflow")
log_format = "%(asctime)s %(levelname)s %(message)s"
formatter = coloredlogs.ColoredFormatter(log_format)
for handler in logger.handlers:
handler.setFormatter(formatter)
logger.propagate = False
def get_memory_usage_string():
used = tf.contrib.memory_stats.BytesInUse()
total = tf.contrib.memory_stats.BytesLimit()
peak = tf.contrib.memory_stats.MaxBytesInUse()
return "{:.1f}/{:.1f}GB ({:.1f}%); peak: {:.1f}GB ({:.1f}%)".format(
used/1e3**3, total/1e3**3, 100.0*used/total, peak/1e3**3, 100.0*peak/total)
def load_class_from_module(module_name, class_name):
return getattr(import_module(module_name, class_name), class_name)
def flatten(list_of_lists):
return [item for sublist in list_of_lists for item in sublist]
def format_human(number, digits=3):
unit = 1000
if number < unit:
return str(number)
magnitude = int(math.log(number) / math.log(unit))
pre = "kMGTPE"[magnitude-1]
scaled_number = number / math.pow(unit, magnitude)
if scaled_number == int(scaled_number):
scaled_number = int(scaled_number)
else:
scaled_number = round(scaled_number, digits)
return "{}{}".format(scaled_number, pre)
def slerp(val, low, high):
# https://github.com/dribnet/plat/blob/master/plat/interpolate.py
"""Spherical interpolation. val has a range of 0 to 1."""
if val <= 0:
return low
if val >= 1:
return high
if np.allclose(low, high):
return low
omega = np.arccos(np.dot(low/np.linalg.norm(low), high/np.linalg.norm(high)))
so = np.sin(omega)
return np.sin((1.0-val)*omega) / so * low + np.sin(val*omega)/so * high
def truncate_input(values, threshold):
tf.logging.debug("Range before truncating: {} - {}".format(tf.reduce_min(values), tf.reduce_max(values)))
def my_elementwise_func(x):
if abs(x) < threshold:
return x
while abs(x) >= threshold:
x = tf.random_normal((1,))[0]
return x
def recursive_map(inputs):
if len(inputs.shape): # pylint: disable=len-as-condition
return tf.map_fn(recursive_map, inputs)
return my_elementwise_func(inputs)
values = recursive_map(values)
tf.logging.debug("Range after truncating: {} - {}".format(tf.reduce_min(values), tf.reduce_max(values)))
return values
| [
"logging.getLogger",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.train.Checkpoint",
"tensorflow.logging.set_verbosity",
"math.log",
"numpy.array",
"tensorflow.python.ops.math_ops.cumsum",
"numpy.sin",
"tensorflow.contrib.memory_stats.BytesInUse",
"tensorflow.python.ops.math_ops.range",
... | [((1988, 2018), 'tensorflow.cast', 'tf.cast', (['(output >= 0)', 'tf.uint8'], {}), '(output >= 0, tf.uint8)\n', (1995, 2018), True, 'import tensorflow as tf\n'), ((2030, 2066), 'tensorflow.cast', 'tf.cast', (['(ground_truth >= 0)', 'tf.uint8'], {}), '(ground_truth >= 0, tf.uint8)\n', (2037, 2066), True, 'import tensorflow as tf\n'), ((2075, 2111), 'tensorflow.count_nonzero', 'tf.count_nonzero', (['(predicted * actual)'], {}), '(predicted * actual)\n', (2091, 2111), True, 'import tensorflow as tf\n'), ((2177, 2219), 'tensorflow.count_nonzero', 'tf.count_nonzero', (['(predicted * (actual - 1))'], {}), '(predicted * (actual - 1))\n', (2193, 2219), True, 'import tensorflow as tf\n'), ((2227, 2269), 'tensorflow.count_nonzero', 'tf.count_nonzero', (['((predicted - 1) * actual)'], {}), '((predicted - 1) * actual)\n', (2243, 2269), True, 'import tensorflow as tf\n'), ((6505, 6531), 'tensorflow.python.ops.math_ops.maximum', 'math_ops.maximum', (['n_r', 'n_g'], {}), '(n_r, n_g)\n', (6521, 6531), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((7016, 7056), 'tensorflow.python.ops.array_ops.zeros', 'array_ops.zeros', (['[1]'], {'dtype': 'dtypes.int32'}), '([1], dtype=dtypes.int32)\n', (7031, 7056), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((7196, 7243), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['real_activations.shape[1]', 'dtype'], {}), '(real_activations.shape[1], dtype)\n', (7209, 7243), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((8092, 8118), 'tensorflow.python.ops.math_ops.reduce_mean', 'math_ops.reduce_mean', (['ests'], {}), '(ests)\n', (8112, 8118), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((8208, 8238), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['n_blocks', 'dtype'], {}), '(n_blocks, dtype)\n', (8221, 8238), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((9731, 9760), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {}), '(**models)\n', (9750, 9760), True, 'import tensorflow as tf\n'), ((10038, 10082), 'os.path.join', 'os.path.join', (['"""data"""', 'data_dir', 'image_subdir'], {}), "('data', data_dir, image_subdir)\n", (10050, 10082), False, 'import os\n'), ((11118, 11174), 'os.path.join', 'os.path.join', (['"""data"""', 'data_dir', 'data_subdirs[image_type]'], {}), "('data', data_dir, data_subdirs[image_type])\n", (11130, 11174), False, 'import os\n'), ((11620, 11661), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (11644, 11661), True, 'import tensorflow as tf\n'), ((11664, 11697), 'coloredlogs.install', 'coloredlogs.install', ([], {'level': '"""INFO"""'}), "(level='INFO')\n", (11683, 11697), False, 'import coloredlogs\n'), ((12004, 12035), 'logging.getLogger', 'logging.getLogger', (['"""tensorflow"""'], {}), "('tensorflow')\n", (12021, 12035), False, 'import logging\n'), ((12105, 12145), 'coloredlogs.ColoredFormatter', 'coloredlogs.ColoredFormatter', (['log_format'], {}), '(log_format)\n', (12133, 12145), False, 'import coloredlogs\n'), ((12285, 12321), 'tensorflow.contrib.memory_stats.BytesInUse', 'tf.contrib.memory_stats.BytesInUse', ([], {}), '()\n', (12319, 12321), True, 'import tensorflow as tf\n'), ((12332, 12368), 'tensorflow.contrib.memory_stats.BytesLimit', 'tf.contrib.memory_stats.BytesLimit', ([], {}), '()\n', (12366, 12368), True, 'import tensorflow as tf\n'), ((12378, 12417), 'tensorflow.contrib.memory_stats.MaxBytesInUse', 'tf.contrib.memory_stats.MaxBytesInUse', ([], {}), '()\n', (12415, 12417), True, 'import tensorflow as tf\n'), ((13421, 13443), 'numpy.allclose', 'np.allclose', (['low', 'high'], {}), '(low, high)\n', (13432, 13443), True, 'import numpy as np\n'), ((13547, 13560), 'numpy.sin', 'np.sin', (['omega'], {}), '(omega)\n', (13553, 13560), True, 'import numpy as np\n'), ((2411, 2425), 'numpy.exp', 'np.exp', (['(-logit)'], {}), '(-logit)\n', (2417, 2425), True, 'import numpy as np\n'), ((2464, 2478), 'tensorflow.exp', 'tf.exp', (['(-logit)'], {}), '(-logit)\n', (2470, 2478), True, 'import tensorflow as tf\n'), ((6157, 6195), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['real_activations', 'dtype'], {}), '(real_activations, dtype)\n', (6170, 6195), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((6224, 6267), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['generated_activations', 'dtype'], {}), '(generated_activations, dtype)\n', (6237, 6267), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((6404, 6437), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['real_activations'], {}), '(real_activations)\n', (6419, 6437), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((6449, 6487), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['generated_activations'], {}), '(generated_activations)\n', (6464, 6487), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((6563, 6603), 'tensorflow.python.ops.math_ops.ceil', 'math_ops.ceil', (['(n_bigger / max_block_size)'], {}), '(n_bigger / max_block_size)\n', (6576, 6603), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((7408, 7439), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['(r_e - r_s)', 'dtype'], {}), '(r_e - r_s, dtype)\n', (7421, 7439), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((7532, 7563), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['(g_e - g_s)', 'dtype'], {}), '(g_e - g_s, dtype)\n', (7545, 7563), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((8028, 8052), 'tensorflow.python.ops.math_ops.range', 'math_ops.range', (['n_blocks'], {}), '(n_blocks)\n', (8042, 8052), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((8276, 8308), 'tensorflow.python.ops.math_ops.less_equal', 'math_ops.less_equal', (['n_blocks', '(1)'], {}), '(n_blocks, 1)\n', (8295, 8308), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((8466, 8496), 'tensorflow.python.ops.math_ops.sqrt', 'math_ops.sqrt', (['(var / n_blocks_)'], {}), '(var / n_blocks_)\n', (8479, 8496), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((9883, 9932), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['config.checkpoint_dir'], {}), '(config.checkpoint_dir)\n', (9909, 9932), True, 'import tensorflow as tf\n'), ((10579, 10623), 'tensorflow.logging.info', 'tf.logging.info', (['"""Adding L-R-flipped images"""'], {}), "('Adding L-R-flipped images')\n", (10594, 10623), True, 'import tensorflow as tf\n'), ((10642, 10686), 'tensorflow.logging.info', 'tf.logging.info', (['"""Adding U-D-flipped images"""'], {}), "('Adding U-D-flipped images')\n", (10657, 10686), True, 'import tensorflow as tf\n'), ((12642, 12680), 'importlib.import_module', 'import_module', (['module_name', 'class_name'], {}), '(module_name, class_name)\n', (12655, 12680), False, 'from importlib import import_module\n'), ((12992, 13017), 'math.pow', 'math.pow', (['unit', 'magnitude'], {}), '(unit, magnitude)\n', (13000, 13017), False, 'import math\n'), ((1688, 1722), 'numpy.array', 'np.array', (['images'], {'dtype': 'np.float32'}), '(images, dtype=np.float32)\n', (1696, 1722), True, 'import numpy as np\n'), ((6767, 6812), 'tensorflow.python.ops.array_ops.fill', 'array_ops.fill', (['[n_blocks - n_plusone_r]', 'v_r'], {}), '([n_blocks - n_plusone_r], v_r)\n', (6781, 6812), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((6820, 6858), 'tensorflow.python.ops.array_ops.fill', 'array_ops.fill', (['[n_plusone_r]', '(v_r + 1)'], {}), '([n_plusone_r], v_r + 1)\n', (6834, 6858), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((6905, 6950), 'tensorflow.python.ops.array_ops.fill', 'array_ops.fill', (['[n_blocks - n_plusone_g]', 'v_g'], {}), '([n_blocks - n_plusone_g], v_g)\n', (6919, 6950), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((6958, 6996), 'tensorflow.python.ops.array_ops.fill', 'array_ops.fill', (['[n_plusone_g]', '(v_g + 1)'], {}), '([n_plusone_g], v_g + 1)\n', (6972, 6996), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((7092, 7116), 'tensorflow.python.ops.math_ops.cumsum', 'math_ops.cumsum', (['sizes_r'], {}), '(sizes_r)\n', (7107, 7116), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((7157, 7181), 'tensorflow.python.ops.math_ops.cumsum', 'math_ops.cumsum', (['sizes_g'], {}), '(sizes_g)\n', (7172, 7181), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((10237, 10271), 'os.path.join', 'os.path.join', (['image_dir', 'file_name'], {}), '(image_dir, file_name)\n', (10249, 10271), False, 'import os\n'), ((12900, 12916), 'math.log', 'math.log', (['number'], {}), '(number)\n', (12908, 12916), False, 'import math\n'), ((12919, 12933), 'math.log', 'math.log', (['unit'], {}), '(unit)\n', (12927, 12933), False, 'import math\n'), ((13736, 13757), 'tensorflow.reduce_min', 'tf.reduce_min', (['values'], {}), '(values)\n', (13749, 13757), True, 'import tensorflow as tf\n'), ((13759, 13780), 'tensorflow.reduce_max', 'tf.reduce_max', (['values'], {}), '(values)\n', (13772, 13780), True, 'import tensorflow as tf\n'), ((14038, 14070), 'tensorflow.map_fn', 'tf.map_fn', (['recursive_map', 'inputs'], {}), '(recursive_map, inputs)\n', (14047, 14070), True, 'import tensorflow as tf\n'), ((14203, 14224), 'tensorflow.reduce_min', 'tf.reduce_min', (['values'], {}), '(values)\n', (14216, 14224), True, 'import tensorflow as tf\n'), ((14226, 14247), 'tensorflow.reduce_max', 'tf.reduce_max', (['values'], {}), '(values)\n', (14239, 14247), True, 'import tensorflow as tf\n'), ((506, 534), 'os.path.basename', 'os.path.basename', (['image_file'], {}), '(image_file)\n', (522, 534), False, 'import os\n'), ((658, 686), 'os.path.basename', 'os.path.basename', (['image_file'], {}), '(image_file)\n', (674, 686), False, 'import os\n'), ((820, 848), 'os.path.basename', 'os.path.basename', (['image_file'], {}), '(image_file)\n', (836, 848), False, 'import os\n'), ((10289, 10310), 'os.listdir', 'os.listdir', (['image_dir'], {}), '(image_dir)\n', (10299, 10310), False, 'import os\n'), ((10811, 10827), 'numpy.fliplr', 'np.fliplr', (['image'], {}), '(image)\n', (10820, 10827), True, 'import numpy as np\n'), ((10875, 10891), 'numpy.flipud', 'np.flipud', (['image'], {}), '(image)\n', (10884, 10891), True, 'import numpy as np\n'), ((13491, 13510), 'numpy.linalg.norm', 'np.linalg.norm', (['low'], {}), '(low)\n', (13505, 13510), True, 'import numpy as np\n'), ((13517, 13537), 'numpy.linalg.norm', 'np.linalg.norm', (['high'], {}), '(high)\n', (13531, 13537), True, 'import numpy as np\n'), ((13570, 13597), 'numpy.sin', 'np.sin', (['((1.0 - val) * omega)'], {}), '((1.0 - val) * omega)\n', (13576, 13597), True, 'import numpy as np\n'), ((13607, 13626), 'numpy.sin', 'np.sin', (['(val * omega)'], {}), '(val * omega)\n', (13613, 13626), True, 'import numpy as np\n'), ((13896, 13918), 'tensorflow.random_normal', 'tf.random_normal', (['(1,)'], {}), '((1,))\n', (13912, 13918), True, 'import tensorflow as tf\n'), ((7577, 7616), 'tensorflow.python.ops.math_ops.matmul', 'math_ops.matmul', (['r', 'r'], {'transpose_b': '(True)'}), '(r, r, transpose_b=True)\n', (7592, 7616), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((7643, 7682), 'tensorflow.python.ops.math_ops.matmul', 'math_ops.matmul', (['r', 'g'], {'transpose_b': '(True)'}), '(r, g, transpose_b=True)\n', (7658, 7682), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((7709, 7748), 'tensorflow.python.ops.math_ops.matmul', 'math_ops.matmul', (['g', 'g'], {'transpose_b': '(True)'}), '(g, g, transpose_b=True)\n', (7724, 7748), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((7780, 7806), 'tensorflow.python.ops.math_ops.reduce_mean', 'math_ops.reduce_mean', (['k_rg'], {}), '(k_rg)\n', (7800, 7806), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((7903, 7928), 'tensorflow.python.ops.math_ops.reduce_sum', 'math_ops.reduce_sum', (['k_gg'], {}), '(k_gg)\n', (7922, 7928), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((7931, 7951), 'tensorflow.python.ops.math_ops.trace', 'math_ops.trace', (['k_gg'], {}), '(k_gg)\n', (7945, 7951), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((8405, 8431), 'tensorflow.python.ops.math_ops.square', 'math_ops.square', (['(ests - mn)'], {}), '(ests - mn)\n', (8420, 8431), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((10961, 10977), 'numpy.fliplr', 'np.fliplr', (['image'], {}), '(image)\n', (10970, 10977), True, 'import numpy as np\n'), ((7822, 7847), 'tensorflow.python.ops.math_ops.reduce_sum', 'math_ops.reduce_sum', (['k_rr'], {}), '(k_rr)\n', (7841, 7847), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((7850, 7870), 'tensorflow.python.ops.math_ops.trace', 'math_ops.trace', (['k_rr'], {}), '(k_rr)\n', (7864, 7870), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n')] |
##@namespace produtil.mpi_impl.srun
# Adds SLURM srun support to produtil.run
#
# This module is part of the mpi_impl package -- see produtil.mpi_impl
# for details. This translates produtil.run directives to SLURM srun
# commands.
import os, logging, re
import produtil.fileop,produtil.prog,produtil.mpiprog,produtil.pipeline
from .mpi_impl_base import MPIMixed,CMDFGen,ImplementationBase, \
MPIThreadsMixed,MPILocalOptsMixed,MPITooManyRanks
from produtil.pipeline import NoMoreProcesses
from produtil.mpiprog import MIXED_VALUES
class Implementation(ImplementationBase):
"""Adds SLURM srun support to produtil.run
This module is part of the mpi_impl package -- see produtil.mpi_impl
for details. This translates produtil.run directives to SLURM srun
commands."""
##@var srun_path
# Path to the srun program
@staticmethod
def name():
return 'srun'
@staticmethod
def detect(srun_path=None,mpiserial_path=None,logger=None,force=False,silent=False,scontrol_path=None,**kwargs):
"""!Detects whether the SLURM srun command is available by
looking for it in the $PATH. Also requires the SLURM_NODELIST
variable. This is to detect the case where srun is available,
but no slurm resources are available."""
if srun_path is None:
if force:
srun_path='srun'
else:
srun_path=produtil.fileop.find_exe('srun',raise_missing=True)
if scontrol_path is None:
if force:
scontrol_path='scontrol'
else:
scontrol_path=produtil.fileop.find_exe('scontrol',raise_missing=True)
if 'SLURM_NODELIST' not in os.environ and not force:
return None
return Implementation(srun_path,scontrol_path,mpiserial_path,logger,silent,force)
def __init__(self,srun_path,scontrol_path,mpiserial_path,logger,silent,force):
super(Implementation,self).__init__(logger)
if mpiserial_path or force:
self.mpiserial_path=mpiserial_path
self.srun_path=srun_path
self.scontrol_path=scontrol_path
self.silent=silent
def runsync(self,logger=None):
"""!Runs the "sync" command as an exe()."""
if logger is None: logger=self.logger
sync=produtil.prog.Runner(['/bin/sync'])
p=produtil.pipeline.Pipeline(sync,capture=True,logger=logger)
version=p.to_string()
status=p.poll()
def openmp(self,arg,threads):
"""!Adds OpenMP support to the provided object
@param arg An produtil.prog.Runner or
produtil.mpiprog.MPIRanksBase object tree
@param threads the number of threads, or threads per rank, an
integer"""
assert(arg is not None)
if threads is not None:
arg.threads=threads
return arg.env(OMP_NUM_THREADS=threads,KMP_NUM_THREADS=threads,
KMP_AFFINITY='scatter')
else:
del arg.threads
return arg
def can_run_mpi(self):
"""!Does this module represent an MPI implementation? Returns True."""
return True
def make_bigexe(self,exe,**kwargs):
"""!Returns an ImmutableRunner that will run the specified program.
@returns an empty list
@param exe The executable to run on compute nodes.
@param kwargs Ignored."""
return produtil.prog.ImmutableRunner([str(exe)],**kwargs)
def mpirunner(self,arg,allranks=False,**kwargs):
"""!Turns a produtil.mpiprog.MPIRanksBase tree into a produtil.prog.Runner
@param arg a tree of produtil.mpiprog.MPIRanksBase objects
@param allranks if True, and only one rank is requested by arg, then
all MPI ranks will be used
@param kwargs passed to produtil.mpi_impl.mpi_impl_base.CMDFGen
when mpiserial is in use.
@returns a produtil.prog.Runner that will run the selected MPI program"""
f=self.mpirunner_impl(arg,allranks=allranks,**kwargs)
if not self.silent:
logging.getLogger('srun').info("%s => %s"%(repr(arg),repr(f)))
return f
def _get_available_nodes(self):
available_nodes=list()
nodeset=set()
scontrol=produtil.prog.Runner([
self.scontrol_path,'show','hostnames',
os.environ['SLURM_NODELIST']])
p=produtil.pipeline.Pipeline(
scontrol,capture=True,logger=self.logger)
nodelist=p.to_string()
status=p.poll()
for line in nodelist.splitlines():
node=line.strip()
if not node: next
if node in nodeset: next
nodeset.add(node)
available_nodes.append(node)
return available_nodes
def mpirunner_impl(self,arg,allranks=False,rewrite_nodefile=True,**kwargs):
"""!This is the underlying implementation of mpirunner and should
not be called directly."""
assert(isinstance(arg,produtil.mpiprog.MPIRanksBase))
(serial,parallel)=arg.check_serial()
if serial and parallel:
raise MPIMixed('Cannot mix serial and parallel MPI ranks in the '
'same MPI program.')
if arg.mixedlocalopts():
raise MPILocalOptsMixed('Cannot mix different local options for different executables or blocks of MPI ranks in impi')
if arg.threads==MIXED_VALUES:
raise MPIThreadsMixed('Cannot mix different thread counts for different executables or blocks of MPI ranks in impi')
srun_args=[self.srun_path,'--export=ALL','--cpu_bind=core']
if arg.nranks()==1 and allranks:
srun_args.append('--distribution=block:block')
arglist=[ str(a) for a in arg.to_arglist(
pre=srun_args,before=[],between=[])]
return produtil.prog.Runner(arglist)
elif allranks:
raise MPIAllRanksError(
"When using allranks=True, you must provide an mpi program "
"specification with only one MPI rank (to be duplicated across "
"all ranks).")
elif serial:
srun_args.append('--distribution=block:block')
arg=produtil.mpiprog.collapse(arg)
lines=[str(a) for a in arg.to_arglist(to_shell=True,expand=True)]
return produtil.prog.Runner(
[self.srun_path,'--ntasks','%s'%(arg.nranks()),self.mpiserial_path],
prerun=CMDFGen('serialcmdf',lines,silent=self.silent,**kwargs))
else:
cmdfile=list()
irank=0
if rewrite_nodefile:
nodefile=list()
available_nodes=self._get_available_nodes()
slurm_ppn_string=os.environ['SLURM_JOB_CPUS_PER_NODE'].strip()
trim_extra=re.sub(r'^(\d+)(?:\(.*\))?',r'\1',slurm_ppn_string)
node_size=int(trim_extra,10)
remaining_nodes=list(available_nodes)
for rank,count in arg.expand_iter(expand=False):
if count<1: next
cmdfile.append('%d-%d %s'%(irank,irank+count-1,rank.to_shell()))
irank+=count
if rewrite_nodefile:
rpn=max(min(node_size,rank.ranks_per_node),1)
need_nodes=max(1,(count+rpn-1)//rpn)
if need_nodes>len(remaining_nodes):
raise MPITooManyRanks('Request is too large for %d nodes of size %d: %s'%(
len(available_nodes),node_size,repr(arg)))
# Split ranks evenly among nodes:
min_rpn=count//need_nodes
nodes_with_extra_rank=count-need_nodes*min_rpn
for n in range(need_nodes):
this_node_rpn=min_rpn
if n<nodes_with_extra_rank:
this_node_rpn+=1
nodefile.extend([remaining_nodes[n]] * this_node_rpn)
# Remove the nodes we used:
remaining_nodes=remaining_nodes[need_nodes:]
srun_args.extend(['--ntasks',str(irank)])
prerun=CMDFGen(
'srun_cmdfile',cmdfile,filename_arg=True,silent=self.silent,
filename_option='--multi-prog',**kwargs)
if rewrite_nodefile:
srun_args.extend(['--distribution','arbitrary'])
prerun=CMDFGen(
'srun_nodefile',nodefile,filename_arg=True,
silent=self.silent,filename_option='--nodelist',
next_prerun=prerun,**kwargs)
return produtil.prog.Runner(srun_args,prerun=prerun)
| [
"logging.getLogger",
"re.sub"
] | [((4136, 4161), 'logging.getLogger', 'logging.getLogger', (['"""srun"""'], {}), "('srun')\n", (4153, 4161), False, 'import os, logging, re\n'), ((6912, 6967), 're.sub', 're.sub', (['"""^(\\\\d+)(?:\\\\(.*\\\\))?"""', '"""\\\\1"""', 'slurm_ppn_string'], {}), "('^(\\\\d+)(?:\\\\(.*\\\\))?', '\\\\1', slurm_ppn_string)\n", (6918, 6967), False, 'import os, logging, re\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# PUB_DATAVIZ: Visualization tools for PINNACLE
# Copyright (c) 2020, <NAME>
#
# MIT License:
# https://github.com/IATE-CONICET-UNC/pinnacle/blob/master/LICENSE
from matplotlib import pyplot as plt
from pinnacle.plot_styles import cycling_attrs, aes_attrs
import numpy as np
import random
class pub_dataviz:
def __init__(self, inst):
'''
Initialize an instance of a visualizerbecariosthods)
----------------
- papers_histogram: histogram of the years of publications
- cumulative_per_author: cumulative number of papers per author
- authors_citations_years: scatter for number of authors and
citations.
- top_proceedings: relation between total number of
publications and papers.
- number_authors: distribution of the number of authors with
time.
'''
self.inst = inst
self.config = inst.config
# def filter_quality(self):
def papers_histogram(self, top=False, per_auth=False, quality=5):
'''
Papers_histogram: histogram of the years of publications
Parameters
----------
top: bool
If True, paper in selected journals are used, otherwise,
all papers.
'''
if top:
y = self.inst.pub_inst_top.year.values
else:
# ACA HACER UNA FUNCION PARA FILTRAR CON EL Q
y = self.inst.pub_inst_all.year.values
if per_auth:
y = list(self.inst.history.index)
Ht = []
for a in y:
k = self.inst.history.loc[a][0]
Ht.append(k)
w = []
for i in range(len(Ht)):
w.append(1/(max(1, Ht[i])))
sufix = '_norm'
else:
y = [int(a) for a in y]
Ht = np.ones(len(y))
w = np.ones(len(Ht))
sufix = ''
tbreaks = np.arange(int(min(y))-0.5, int(max(y)+1)+0.5, 1)
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot()
H = ax.hist(y, bins=tbreaks, weights=w)
ymax = max(H[0])
ax.set_ylim(0, ymax)
ax.grid()
ax.set_xlabel('year')
if top:
ax.set_ylabel('number of papers')
ax.set_title('publications by IATE')
fout = (f"{self.config.dir_plot}/"
f"papers_per_year_top{sufix}.png")
else:
ax.set_ylabel('number of published works')
ax.set_title('papers published by IATE')
fout = (f"{self.config.dir_plot}/"
f"papers_per_year_all{sufix}.png")
fig.savefig(fout)
plt.close()
def papers_histogram2(self, top=False, per_auth=False):
'''
Papers_histogram: histogram of the years of publications
Parameters
----------
top: bool
If True, paper in selected journals are used, otherwise,
all papers.
'''
if per_auth:
y = list(self.inst.history.index)
npp = []
for a in y:
k = self.inst.history.loc[a]
if top:
npp.append(k[2]/max(1, k[0]))
else:
npp.append(k[1]/max(1, k[0]))
sufix = '_norm'
hist = npp
else:
y = list(self.inst.history.index)
y = [int(a) for a in y]
sufix = ''
tbreaks = np.arange(int(min(y))-0.5, int(max(y)+1)+0.5, 1)
H = np.histogram(y, bins=tbreaks)
hist = H[0]
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot()
ax.step(y, hist)
ymax = max(hist)*1.05
ax.set_ylim(0, ymax)
ax.grid()
ax.set_xlabel('year')
if top:
ax.set_ylabel('number of papers')
ax.set_title('publications by IATE')
fout = (f"{self.config.dir_plot}/"
f"papers_per_year_top{sufix}.png")
else:
ax.set_ylabel('number of published works')
ax.set_title('papers published by IATE')
fout = (f"{self.config.dir_plot}/"
f"papers_per_year_all{sufix}.png")
fig.savefig(fout)
plt.close()
def cumulative_per_author(self, top=False, normalize_first=False):
'''
Parameters
----------
top: bool
Use all works or papers from selected journals
normalize_first: bool
Normalize to the year of the first publication
'''
import datetime
now = datetime.datetime.now()
current_year = now.year
if normalize_first:
tedges = np.arange(-0.5, 20.5, 1)
tmeans = np.arange(0, 20, 1)
fout = (f"{self.config.dir_plot}/papers_by_author_zero.png")
titlen = 'normalized to first'
xlab = 'years from first publication'
else:
tedges = np.arange(1995, 2021, 1)
tmeans = np.arange(1995, 2020, 1)
fout = (f"{self.config.dir_plot}/papers_by_author_year.png")
titlen = ''
xlab = 'year'
if top:
df = self.inst.pub_auth_top
titlet = 'papers'
else:
df = self.inst.pub_auth_all
titlet = 'publications'
fig = plt.figure(figsize=(14, 7))
ax = fig.add_subplot()
cycling_attrs()
y_max = 0
auth_names = list(df.author1.unique())
for a in auth_names:
d = df[df['author1'].isin([a])]
y = [int(i) for i in d.year.values]
if len(y) == 0:
continue
y = np.array(y)
if normalize_first:
active = current_year - min(y) + 1
y = y - min(y)
tedges = np.arange(-0.5, active + 0.5, 1)
tmeans = np.arange(0, active, 1)
H = np.histogram(y, bins=tedges)
ac = H[0].cumsum()
y_max = max(y_max, max(ac))
aesthetics = aes_attrs()
ax.plot(tmeans, ac, label=a, **aesthetics)
title = f'Cumulative {titlet} by IATE researchers {titlen}'
ax.set_title(title)
ax.set_xlabel(xlab)
ax.set_ylabel('cumulative number')
ax.legend(loc=2, ncol=2, fontsize='small', frameon=False,
handlelength=6)
fig.savefig(fout)
plt.close()
def authors_citations_years(self, top=True):
'''
Plot a scatter of number of authors and number of citations
Parameters
----------
top: bool
Use all works or papers from selected journals
'''
if top:
df = self.inst.pub_inst_top
else:
df = self.inst.pub_inst_all
npapers = df.shape[0]
na = []
nc = []
ye = []
for i in range(npapers):
pprs = df.iloc[i]
nauths = len(pprs.authors)
ncitas = pprs.citation_count
year = pprs.year
r = random.random()*0.6 - 0.3
na.append(nauths+r)
r = random.random()*0.6 - 0.3
nc.append(ncitas+1+r)
ye.append(int(year))
y = ((np.array(ye)-1980)*0.2)**2.6
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot()
ax.scatter(na, nc, s=y, color=(0, 0, 1, 0.3))
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('Number of authors')
ax.set_ylabel('Number of citations + 1')
ax.legend(loc='center left', bbox_to_anchor=(1.1, 0.5), labelspacing=3)
fout = (f"{self.config.dir_plot}/nauth_ncitas_year.png")
fig.savefig(fout)
plt.close()
def top_proceedings(self):
'''
Plot a scatter of number of publications vs number of papers
'''
tod = []
top = []
auth_names = list(self.inst.pub_inst_all.author1.unique())
for a in auth_names:
df = self.inst.pub_inst_all
dfa = df[df['author1'].isin([a])]
df = self.inst.pub_inst_top
dft = df[df['author1'].isin([a])]
tod.append(dfa.shape[0])
top.append(dft.shape[0])
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot()
ax.scatter(tod, top)
m = max(tod)
ax.plot([0, m], [0, m])
ax.set_title('all works vs. top papers')
ax.set_xlabel('all works')
ax.set_ylabel('papers top')
fout = (f"{self.config.dir_plot}/top_vs_all.png")
fig.savefig(fout)
plt.close()
def number_authors(self, top=True):
'''
Plot a scatter for the number of authors as a function of time
Parameters
----------
top: bool
Use all works or papers from selected journals
'''
if top:
df = self.inst.pub_inst_top
else:
df = self.inst.pub_inst_all
nauth = []
for i, p in df.iterrows():
nauth.append(len(p.authors))
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot()
years = [int(y) for y in df.year.values]
ax.scatter(years, nauth)
ax.set_yscale('log')
ax.set_title('number of authors per year')
ax.set_xlabel('year')
ax.set_ylabel('N authors')
fout = (f"{self.config.dir_plot}/year_nauth.png")
fig.savefig(fout)
plt.close()
def nauth_npprs(self, top=True):
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot()
x = list(self.inst.history.index)
y = self.inst.history['pop']
if top:
z = self.inst.history['npapers_top']
else:
z = self.inst.history['npapers_all']
ax.plot(x, y, label='authors')
ax.plot(x, z, label='papers')
ax.legend()
ax.set_title('number of authors per paper')
ax.set_xlabel('year')
ax.set_ylabel('N authors / paper')
if top:
ax.set_title('publications by IATE, top papers')
fout = (f"{self.config.dir_plot}/nauth_npprs_years_top.png")
else:
ax.set_title('papers published by IATE, all works')
fout = (f"{self.config.dir_plot}/nauth_npprs_years_all.png")
fig.savefig(fout)
plt.close()
def plot_all(self):
'''
Make all the plots.
'''
self.papers_histogram2(top=True)
self.papers_histogram2(top=False)
self.papers_histogram2(top=True, per_auth=True)
self.papers_histogram2(top=False, per_auth=True)
self.cumulative_per_author(top=False, normalize_first=False)
self.cumulative_per_author(top=False, normalize_first=True)
self.cumulative_per_author(top=True, normalize_first=False)
self.cumulative_per_author(top=True, normalize_first=True)
self.authors_citations_years()
self.top_proceedings()
self.nauth_npprs()
| [
"numpy.histogram",
"pinnacle.plot_styles.cycling_attrs",
"matplotlib.pyplot.close",
"datetime.datetime.now",
"matplotlib.pyplot.figure",
"numpy.array",
"pinnacle.plot_styles.aes_attrs",
"random.random",
"numpy.arange"
] | [((2049, 2075), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (2059, 2075), True, 'from matplotlib import pyplot as plt\n'), ((2733, 2744), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2742, 2744), True, 'from matplotlib import pyplot as plt\n'), ((3678, 3704), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (3688, 3704), True, 'from matplotlib import pyplot as plt\n'), ((4344, 4355), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4353, 4355), True, 'from matplotlib import pyplot as plt\n'), ((4699, 4722), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4720, 4722), False, 'import datetime\n'), ((5458, 5485), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 7)'}), '(figsize=(14, 7))\n', (5468, 5485), True, 'from matplotlib import pyplot as plt\n'), ((5525, 5540), 'pinnacle.plot_styles.cycling_attrs', 'cycling_attrs', ([], {}), '()\n', (5538, 5540), False, 'from pinnacle.plot_styles import cycling_attrs, aes_attrs\n'), ((6544, 6555), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6553, 6555), True, 'from matplotlib import pyplot as plt\n'), ((7423, 7450), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (7433, 7450), True, 'from matplotlib import pyplot as plt\n'), ((7867, 7878), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7876, 7878), True, 'from matplotlib import pyplot as plt\n'), ((8399, 8426), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (8409, 8426), True, 'from matplotlib import pyplot as plt\n'), ((8754, 8765), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8763, 8765), True, 'from matplotlib import pyplot as plt\n'), ((9242, 9269), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (9252, 9269), True, 'from matplotlib import pyplot as plt\n'), ((9624, 9635), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9633, 9635), True, 'from matplotlib import pyplot as plt\n'), ((9688, 9715), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (9698, 9715), True, 'from matplotlib import pyplot as plt\n'), ((10516, 10527), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10525, 10527), True, 'from matplotlib import pyplot as plt\n'), ((3609, 3638), 'numpy.histogram', 'np.histogram', (['y'], {'bins': 'tbreaks'}), '(y, bins=tbreaks)\n', (3621, 3638), True, 'import numpy as np\n'), ((4805, 4829), 'numpy.arange', 'np.arange', (['(-0.5)', '(20.5)', '(1)'], {}), '(-0.5, 20.5, 1)\n', (4814, 4829), True, 'import numpy as np\n'), ((4851, 4870), 'numpy.arange', 'np.arange', (['(0)', '(20)', '(1)'], {}), '(0, 20, 1)\n', (4860, 4870), True, 'import numpy as np\n'), ((5072, 5096), 'numpy.arange', 'np.arange', (['(1995)', '(2021)', '(1)'], {}), '(1995, 2021, 1)\n', (5081, 5096), True, 'import numpy as np\n'), ((5118, 5142), 'numpy.arange', 'np.arange', (['(1995)', '(2020)', '(1)'], {}), '(1995, 2020, 1)\n', (5127, 5142), True, 'import numpy as np\n'), ((5799, 5810), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (5807, 5810), True, 'import numpy as np\n'), ((6049, 6077), 'numpy.histogram', 'np.histogram', (['y'], {'bins': 'tedges'}), '(y, bins=tedges)\n', (6061, 6077), True, 'import numpy as np\n'), ((6175, 6186), 'pinnacle.plot_styles.aes_attrs', 'aes_attrs', ([], {}), '()\n', (6184, 6186), False, 'from pinnacle.plot_styles import cycling_attrs, aes_attrs\n'), ((5950, 5982), 'numpy.arange', 'np.arange', (['(-0.5)', '(active + 0.5)', '(1)'], {}), '(-0.5, active + 0.5, 1)\n', (5959, 5982), True, 'import numpy as np\n'), ((6008, 6031), 'numpy.arange', 'np.arange', (['(0)', 'active', '(1)'], {}), '(0, active, 1)\n', (6017, 6031), True, 'import numpy as np\n'), ((7197, 7212), 'random.random', 'random.random', ([], {}), '()\n', (7210, 7212), False, 'import random\n'), ((7271, 7286), 'random.random', 'random.random', ([], {}), '()\n', (7284, 7286), False, 'import random\n'), ((7379, 7391), 'numpy.array', 'np.array', (['ye'], {}), '(ye)\n', (7387, 7391), True, 'import numpy as np\n')] |
import numpy as np
def Topsis(weights, numerical_data, impact):
try:
if(numerical_data.shape[1] != weights.shape[0] or weights.shape != impact.shape or numerical_data.shape[1] != impact.shape[0]):
raise Exception("Given input is not correct")
except Exception as e:
print("Given input is incorrect")
return
#Converting weight matrix into percent form
weights = weights/weights.sum()
#Making normalized matrix
for i in range(numerical_data.shape[1]):
numerical_data[:,i] = (numerical_data[:,i]/np.sqrt((numerical_data[:,i]**2).sum()))
#Multiplying columns with their specific weights
numerical_data = numerical_data*(weights.reshape(1,numerical_data.shape[1]))
ideal_best_values = []
ideal_worst_values = []
for i in range(numerical_data.shape[1]):
if(impact[i] == "+"):
#It indicates this particular feature value need to be increased
ideal_best_values.append(numerical_data[:,i].max())
ideal_worst_values.append(numerical_data[:,i].min())
elif(impact[i] == "-"):
#This feature value need to be decreased
ideal_best_values.append(numerical_data[:,i].min())
ideal_worst_values.append(numerical_data[:,i].max())
ideal_best_values = np.array(ideal_best_values, dtype = np.float)
ideal_worst_values = np.array(ideal_worst_values, dtype = np.float)
euclDist_ideal_best = np.sqrt(((numerical_data - ideal_best_values)**2).sum(axis = 1))
euclDist_ideal_worst = np.sqrt(((numerical_data - ideal_worst_values)**2).sum(axis = 1))
performance_score = euclDist_ideal_worst/(euclDist_ideal_best + euclDist_ideal_worst)
ranking = np.argsort(performance_score)
return np.argmax(performance_score)#Returning the index of the row having maximum performance score
| [
"numpy.argsort",
"numpy.array",
"numpy.argmax"
] | [((1182, 1225), 'numpy.array', 'np.array', (['ideal_best_values'], {'dtype': 'np.float'}), '(ideal_best_values, dtype=np.float)\n', (1190, 1225), True, 'import numpy as np\n'), ((1250, 1294), 'numpy.array', 'np.array', (['ideal_worst_values'], {'dtype': 'np.float'}), '(ideal_worst_values, dtype=np.float)\n', (1258, 1294), True, 'import numpy as np\n'), ((1575, 1604), 'numpy.argsort', 'np.argsort', (['performance_score'], {}), '(performance_score)\n', (1585, 1604), True, 'import numpy as np\n'), ((1613, 1641), 'numpy.argmax', 'np.argmax', (['performance_score'], {}), '(performance_score)\n', (1622, 1641), True, 'import numpy as np\n')] |
from pyspark.sql.types import (
ArrayType,
IntegerType,
StringType,
StructField,
StructType,
)
from butterfree.extract.pre_processing import explode_json_column
from butterfree.testing.dataframe import (
assert_dataframe_equality,
create_df_from_collection,
)
def test_explode_json_column(spark_context, spark_session):
# arrange
input_data = [{"json_column": '{"a": 123, "b": "abc", "c": "123", "d": [1, 2, 3]}'}]
target_data = [
{
"json_column": '{"a": 123, "b": "abc", "c": "123", "d": [1, 2, 3]}',
"a": 123,
"b": "abc",
"c": 123,
"d": [1, 2, 3],
}
]
input_df = create_df_from_collection(input_data, spark_context, spark_session)
target_df = create_df_from_collection(target_data, spark_context, spark_session)
json_column_schema = StructType(
[
StructField("a", IntegerType()),
StructField("b", StringType()),
StructField("c", IntegerType()),
StructField("d", ArrayType(IntegerType())),
]
)
# act
output_df = explode_json_column(input_df, "json_column", json_column_schema)
# arrange
assert_dataframe_equality(target_df, output_df)
| [
"butterfree.testing.dataframe.create_df_from_collection",
"pyspark.sql.types.IntegerType",
"butterfree.testing.dataframe.assert_dataframe_equality",
"butterfree.extract.pre_processing.explode_json_column",
"pyspark.sql.types.StringType"
] | [((693, 760), 'butterfree.testing.dataframe.create_df_from_collection', 'create_df_from_collection', (['input_data', 'spark_context', 'spark_session'], {}), '(input_data, spark_context, spark_session)\n', (718, 760), False, 'from butterfree.testing.dataframe import assert_dataframe_equality, create_df_from_collection\n'), ((777, 845), 'butterfree.testing.dataframe.create_df_from_collection', 'create_df_from_collection', (['target_data', 'spark_context', 'spark_session'], {}), '(target_data, spark_context, spark_session)\n', (802, 845), False, 'from butterfree.testing.dataframe import assert_dataframe_equality, create_df_from_collection\n'), ((1127, 1191), 'butterfree.extract.pre_processing.explode_json_column', 'explode_json_column', (['input_df', '"""json_column"""', 'json_column_schema'], {}), "(input_df, 'json_column', json_column_schema)\n", (1146, 1191), False, 'from butterfree.extract.pre_processing import explode_json_column\n'), ((1211, 1258), 'butterfree.testing.dataframe.assert_dataframe_equality', 'assert_dataframe_equality', (['target_df', 'output_df'], {}), '(target_df, output_df)\n', (1236, 1258), False, 'from butterfree.testing.dataframe import assert_dataframe_equality, create_df_from_collection\n'), ((923, 936), 'pyspark.sql.types.IntegerType', 'IntegerType', ([], {}), '()\n', (934, 936), False, 'from pyspark.sql.types import ArrayType, IntegerType, StringType, StructField, StructType\n'), ((968, 980), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (978, 980), False, 'from pyspark.sql.types import ArrayType, IntegerType, StringType, StructField, StructType\n'), ((1012, 1025), 'pyspark.sql.types.IntegerType', 'IntegerType', ([], {}), '()\n', (1023, 1025), False, 'from pyspark.sql.types import ArrayType, IntegerType, StringType, StructField, StructType\n'), ((1067, 1080), 'pyspark.sql.types.IntegerType', 'IntegerType', ([], {}), '()\n', (1078, 1080), False, 'from pyspark.sql.types import ArrayType, IntegerType, StringType, StructField, StructType\n')] |
'''
runs in python 2.7
This program converts topocentric data to geocentric data.
'''
from PySide.QtGui import QApplication,QWidget,QComboBox,QVBoxLayout,QLabel,QFormLayout
import os
import csv
import sys
topo=[]
sp=[]
wp=[]
geo=[]
i=0
filename1=""
filename2=""
def writeToFile(filename3,geo):
'''
writes the data to a .csv file.
args:
filename3(String):name of data file where data is to be stored
geo(list):Data to be stored in format(time,x,y,z)
'''
fields=["time","x","y","z"]
#print(geo)
with open(filename3,'w') as filewriter:
csvwriter=csv.writer(filewriter)
csvwriter.writerow(fields)
csvwriter.writerows(geo)
def readFromFile(filename1,filename2):
'''
reads data from 2 files and converts the topocentric coordinate-system to geocentric system
args:
filename1(String):topocentric position data file(.csv file)
filename2(String):file containing position of workstation..txt if single workstation is concerned
.csv if multiple workstations are concerned.
'''
filename3="g"+filename1
if(".txt" in filename2):
fp=open(filename2,"r")
line=fp.read()
wp=map(float,line.split())
i=0
workstationPosition=list(wp)
with open(filename1, 'r') as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
gs=[]
i=i+1
sp=map(float,row[0].split("\t"))
satellitePosition=list(sp)
gs.append(satellitePosition[0])
gs.append(satellitePosition[1]+workstationPosition[0])
gs.append(satellitePosition[2]+workstationPosition[1])
gs.append(satellitePosition[3]+workstationPosition[2])
geo.append(gs)
writeToFile(filename3,geo)
print("Added ",i," data successfully")
if(".csv" in filename2):
with open(filename1,'r') as fp1:
cr1=csv.reader(fp1)
sp=[]
for r1 in cr1:
sp.append(list(map(float,r1[0].split("\t"))))
with open(filename1,'r') as fp2:
cr2=csv.reader(fp2)
wp=[]
for r2 in cr2:
wp.append(list(map(float,r2[0].split("\t"))))
if(len(wp)!=len(sp)):
print(".csv files have different lengths")
else:
for i in range(len(wp)):
gs=[]
gs.append(sp[i][0])
gs.append(sp[i][1]+wp[i][0])
gs.append(sp[i][2]+wp[i][1])
gs.append(sp[i][3]+wp[i][2])
geo.append(gs)
writeToFile(filename3,geo)
print("Added ",i,"Data Successfully")
class SearchFile(QWidget):
def __init__(self):
'Makes GUI'
QWidget.__init__(self)
self.setWindowTitle("Search window")
self.fn1=""
self.fn2=""
L1=[]
st=os.getcwd()
L=os.listdir(st)
for filenames in L:
if(".txt" in filenames or ".csv" in filenames):
L1.append(filenames)
self.files1=QComboBox()
self.files2=QComboBox()
#self.files1.setText("SatelliteDataFile")
#print(self.files1)
#print(self.files2)
self.files1.addItems(L1)
self.files1.setCurrentIndex(-1)
self.files2=QComboBox()
self.files2.addItems(L1)
self.files2.setCurrentIndex(-1)
self.files1.currentIndexChanged.connect(lambda:self.returnString(self.files1))
self.files2.currentIndexChanged.connect(lambda:self.returnString(self.files2))
self.setUpUI()
def setUpUI(self):
layout=QVBoxLayout()
form=QFormLayout()
form.addRow(QLabel("Satellite Data File"),self.files1)
form.addRow(QLabel("Work Station Position DataFile"),self.files2)
layout.addLayout(form)
self.setLayout(layout)
def returnString(self,files):
if(files==self.files1):
self.fn1=str(self.files1.currentText())
else:
self.fn2=str(self.files2.currentText())
app=QApplication(sys.argv)
sf=SearchFile()
sf.show()
(app.exec_())
readFromFile(sf.fn1,sf.fn2)
| [
"os.listdir",
"PySide.QtGui.QFormLayout",
"PySide.QtGui.QLabel",
"csv.writer",
"PySide.QtGui.QComboBox",
"os.getcwd",
"PySide.QtGui.QVBoxLayout",
"PySide.QtGui.QApplication",
"csv.reader",
"PySide.QtGui.QWidget.__init__"
] | [((4426, 4448), 'PySide.QtGui.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (4438, 4448), False, 'from PySide.QtGui import QApplication, QWidget, QComboBox, QVBoxLayout, QLabel, QFormLayout\n'), ((595, 617), 'csv.writer', 'csv.writer', (['filewriter'], {}), '(filewriter)\n', (605, 617), False, 'import csv\n'), ((3040, 3062), 'PySide.QtGui.QWidget.__init__', 'QWidget.__init__', (['self'], {}), '(self)\n', (3056, 3062), False, 'from PySide.QtGui import QApplication, QWidget, QComboBox, QVBoxLayout, QLabel, QFormLayout\n'), ((3173, 3184), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3182, 3184), False, 'import os\n'), ((3195, 3209), 'os.listdir', 'os.listdir', (['st'], {}), '(st)\n', (3205, 3209), False, 'import os\n'), ((3355, 3366), 'PySide.QtGui.QComboBox', 'QComboBox', ([], {}), '()\n', (3364, 3366), False, 'from PySide.QtGui import QApplication, QWidget, QComboBox, QVBoxLayout, QLabel, QFormLayout\n'), ((3388, 3399), 'PySide.QtGui.QComboBox', 'QComboBox', ([], {}), '()\n', (3397, 3399), False, 'from PySide.QtGui import QApplication, QWidget, QComboBox, QVBoxLayout, QLabel, QFormLayout\n'), ((3600, 3611), 'PySide.QtGui.QComboBox', 'QComboBox', ([], {}), '()\n', (3609, 3611), False, 'from PySide.QtGui import QApplication, QWidget, QComboBox, QVBoxLayout, QLabel, QFormLayout\n'), ((3945, 3958), 'PySide.QtGui.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (3956, 3958), False, 'from PySide.QtGui import QApplication, QWidget, QComboBox, QVBoxLayout, QLabel, QFormLayout\n'), ((3972, 3985), 'PySide.QtGui.QFormLayout', 'QFormLayout', ([], {}), '()\n', (3983, 3985), False, 'from PySide.QtGui import QApplication, QWidget, QComboBox, QVBoxLayout, QLabel, QFormLayout\n'), ((1378, 1397), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (1388, 1397), False, 'import csv\n'), ((2112, 2127), 'csv.reader', 'csv.reader', (['fp1'], {}), '(fp1)\n', (2122, 2127), False, 'import csv\n'), ((2322, 2337), 'csv.reader', 'csv.reader', (['fp2'], {}), '(fp2)\n', (2332, 2337), False, 'import csv\n'), ((4006, 4035), 'PySide.QtGui.QLabel', 'QLabel', (['"""Satellite Data File"""'], {}), "('Satellite Data File')\n", (4012, 4035), False, 'from PySide.QtGui import QApplication, QWidget, QComboBox, QVBoxLayout, QLabel, QFormLayout\n'), ((4071, 4111), 'PySide.QtGui.QLabel', 'QLabel', (['"""Work Station Position DataFile"""'], {}), "('Work Station Position DataFile')\n", (4077, 4111), False, 'from PySide.QtGui import QApplication, QWidget, QComboBox, QVBoxLayout, QLabel, QFormLayout\n')] |
#!python3
r""" combobox.py
https://tkdocs.com/tutorial/widgets.html#combobox
"""
from tkinter import *
from tkinter import ttk
root = Tk()
root.title("Combobox")
s = StringVar(value="On") # default value is ''
b = BooleanVar(value=True) # default is False
i = IntVar(value=10) # default is 0
d = DoubleVar(value=10.5) # default is 0.0
def comboboxSelected(*args):
print("comboboxSelected({})".format(args))
print(" s = {}".format(s.get()))
cs.selection_clear()
print(" b = {}".format(b.get()))
cb.selection_clear()
print(" i = {}".format(i.get()))
ci.selection_clear()
print(" d = {}".format(d.get()))
cd.selection_clear()
cs = ttk.Combobox(root, textvariable=s)
cs.bind('<<ComboboxSelected>>', comboboxSelected)
cs['values'] = ('On', 'Off')
cs.grid()
cb = ttk.Combobox(root, textvariable=b)
cb.bind('<<ComboboxSelected>>', comboboxSelected)
cb['values'] = (True, False)
cb.grid()
ci = ttk.Combobox(root, textvariable=i)
ci.bind('<<ComboboxSelected>>', comboboxSelected)
ci['values'] = (10, 1)
ci.grid()
cd = ttk.Combobox(root, textvariable=d)
cd.bind('<<ComboboxSelected>>', comboboxSelected)
cd['values'] = (10.5, 2.3)
cd.grid()
root.mainloop()
| [
"tkinter.ttk.Combobox"
] | [((690, 724), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['root'], {'textvariable': 's'}), '(root, textvariable=s)\n', (702, 724), False, 'from tkinter import ttk\n'), ((820, 854), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['root'], {'textvariable': 'b'}), '(root, textvariable=b)\n', (832, 854), False, 'from tkinter import ttk\n'), ((950, 984), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['root'], {'textvariable': 'i'}), '(root, textvariable=i)\n', (962, 984), False, 'from tkinter import ttk\n'), ((1074, 1108), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['root'], {'textvariable': 'd'}), '(root, textvariable=d)\n', (1086, 1108), False, 'from tkinter import ttk\n')] |
#!/usr/bin/env python3
# Copyright (c) 2021 Johns Hopkins University (authors: <NAME>)
# Apache 2.0
import argparse
import os
import subprocess
import sys
from contextlib import contextmanager
from pathlib import Path
import torch
from lhotse import CutSet, Fbank, FbankConfig, LilcomHdf5Writer, combine
from lhotse.recipes import download_ami, prepare_ami
# Torch's multithreaded behavior needs to be disabled or it wastes a lot of CPU and
# slow things down. Do this outside of main() in case it needs to take effect
# even when we are not invoking the main (e.g. when spawning subprocesses).
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
@contextmanager
def get_executor():
# We'll either return a process pool or a distributed worker pool.
# Note that this has to be a context manager because we might use multiple
# context manager ("with" clauses) inside, and this way everything will
# free up the resources at the right time.
try:
# If this is executed on the CLSP grid, we will try to use the
# Grid Engine to distribute the tasks.
# Other clusters can also benefit from that, provided a cluster-specific wrapper.
# (see https://github.com/pzelasko/plz for reference)
#
# The following must be installed:
# $ pip install dask distributed
# $ pip install git+https://github.com/pzelasko/plz
name = subprocess.check_output("hostname -f", shell=True, text=True)
if name.strip().endswith(".clsp.jhu.edu"):
import plz
from distributed import Client
with plz.setup_cluster(memory="6G") as cluster:
cluster.scale(80)
yield Client(cluster)
return
except:
pass
# No need to return anything - compute_and_store_features
# will just instantiate the pool itself.
yield None
def locate_corpus(*corpus_dirs):
for d in corpus_dirs:
if os.path.exists(d):
return d
print(
"Please create a place on your system to put the downloaded Librispeech data "
"and add it to `corpus_dirs`"
)
sys.exit(1)
def get_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--num-jobs", type=int, default=min(15, os.cpu_count()))
return parser
def main():
args = get_parser().parse_args()
corpus_dir = locate_corpus(
Path("/export/corpora5/AMI/amicorpus"),
)
annotations_dir = Path("/export/c07/draj")
download_ami(corpus_dir, annotations_dir=annotations_dir, mic="sdm")
output_dir = Path("exp/data")
print("AMI manifest preparation:")
ami_manifests = prepare_ami(
corpus_dir,
annotations_dir=annotations_dir,
output_dir=output_dir,
mic="sdm",
partition="full-corpus",
max_pause=0,
)
print("Feature extraction:")
extractor = Fbank(FbankConfig(num_mel_bins=80))
with get_executor() as ex: # Initialize the executor only once.
for partition, manifests in ami_manifests.items():
if (output_dir / f"cuts_{partition}.json.gz").is_file():
print(f"{partition} already exists - skipping.")
continue
print("Processing", partition)
cut_set = CutSet.from_manifests(
recordings=manifests["recordings"],
supervisions=manifests["supervisions"],
).cut_into_windows(duration=5)
cut_set = cut_set.compute_and_store_features(
extractor=extractor,
storage_path=f"{output_dir}/feats_{partition}",
# when an executor is specified, make more partitions
num_jobs=args.num_jobs if ex is None else min(80, len(cut_set)),
executor=ex,
storage_type=LilcomHdf5Writer,
).pad(duration=5.0)
cut_set.to_json(output_dir / f"cuts_{partition}.json.gz")
if __name__ == "__main__":
main()
| [
"subprocess.check_output",
"os.path.exists",
"argparse.ArgumentParser",
"pathlib.Path",
"lhotse.recipes.prepare_ami",
"lhotse.recipes.download_ami",
"torch.set_num_threads",
"lhotse.FbankConfig",
"lhotse.CutSet.from_manifests",
"os.cpu_count",
"sys.exit",
"distributed.Client",
"plz.setup_clu... | [((602, 626), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (623, 626), False, 'import torch\n'), ((627, 659), 'torch.set_num_interop_threads', 'torch.set_num_interop_threads', (['(1)'], {}), '(1)\n', (656, 659), False, 'import torch\n'), ((2155, 2166), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2163, 2166), False, 'import sys\n'), ((2200, 2279), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (2223, 2279), False, 'import argparse\n'), ((2553, 2577), 'pathlib.Path', 'Path', (['"""/export/c07/draj"""'], {}), "('/export/c07/draj')\n", (2557, 2577), False, 'from pathlib import Path\n'), ((2583, 2651), 'lhotse.recipes.download_ami', 'download_ami', (['corpus_dir'], {'annotations_dir': 'annotations_dir', 'mic': '"""sdm"""'}), "(corpus_dir, annotations_dir=annotations_dir, mic='sdm')\n", (2595, 2651), False, 'from lhotse.recipes import download_ami, prepare_ami\n'), ((2670, 2686), 'pathlib.Path', 'Path', (['"""exp/data"""'], {}), "('exp/data')\n", (2674, 2686), False, 'from pathlib import Path\n'), ((2747, 2880), 'lhotse.recipes.prepare_ami', 'prepare_ami', (['corpus_dir'], {'annotations_dir': 'annotations_dir', 'output_dir': 'output_dir', 'mic': '"""sdm"""', 'partition': '"""full-corpus"""', 'max_pause': '(0)'}), "(corpus_dir, annotations_dir=annotations_dir, output_dir=\n output_dir, mic='sdm', partition='full-corpus', max_pause=0)\n", (2758, 2880), False, 'from lhotse.recipes import download_ami, prepare_ami\n'), ((1419, 1480), 'subprocess.check_output', 'subprocess.check_output', (['"""hostname -f"""'], {'shell': '(True)', 'text': '(True)'}), "('hostname -f', shell=True, text=True)\n", (1442, 1480), False, 'import subprocess\n'), ((1969, 1986), 'os.path.exists', 'os.path.exists', (['d'], {}), '(d)\n', (1983, 1986), False, 'import os\n'), ((2485, 2523), 'pathlib.Path', 'Path', (['"""/export/corpora5/AMI/amicorpus"""'], {}), "('/export/corpora5/AMI/amicorpus')\n", (2489, 2523), False, 'from pathlib import Path\n'), ((2987, 3015), 'lhotse.FbankConfig', 'FbankConfig', ([], {'num_mel_bins': '(80)'}), '(num_mel_bins=80)\n', (2998, 3015), False, 'from lhotse import CutSet, Fbank, FbankConfig, LilcomHdf5Writer, combine\n'), ((1616, 1646), 'plz.setup_cluster', 'plz.setup_cluster', ([], {'memory': '"""6G"""'}), "(memory='6G')\n", (1633, 1646), False, 'import plz\n'), ((2358, 2372), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (2370, 2372), False, 'import os\n'), ((1715, 1730), 'distributed.Client', 'Client', (['cluster'], {}), '(cluster)\n', (1721, 1730), False, 'from distributed import Client\n'), ((3369, 3471), 'lhotse.CutSet.from_manifests', 'CutSet.from_manifests', ([], {'recordings': "manifests['recordings']", 'supervisions': "manifests['supervisions']"}), "(recordings=manifests['recordings'], supervisions=\n manifests['supervisions'])\n", (3390, 3471), False, 'from lhotse import CutSet, Fbank, FbankConfig, LilcomHdf5Writer, combine\n')] |
from setuptools import setup, find_packages
# TBD: use_scm_version = True/ setup_requires=["setuptools_scm"]
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name = "jetconf_mnat",
author = "<NAME>",
author_email="<EMAIL>",
description = "MNAT server jetconf backend",
long_description = long_description,
long_description_content_type="text/markdown",
url = "https://github.com/GrumpyOldTroll/mnat/server",
packages = find_packages(),
install_requires = ["jetconf"],
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 3 - Alpha",
"Intended Audience :: Telecommunications Industry",
],
package_data = {
"": ["yang-library-data.json"]
}
)
| [
"setuptools.find_packages"
] | [((498, 513), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (511, 513), False, 'from setuptools import setup, find_packages\n')] |
import torchvision
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import torchvision.models as models
from torchvision.io import read_image
from torchvision import datasets, models, transforms
import matplotlib
import matplotlib.pyplot as plt
import time
import os
import copy
import csv
import pandas as pd
import numpy as np
from skimage import io, transform
from pynput.mouse import Listener as MouseListener
from pynput.mouse import Button, Controller
from pynput.keyboard import Listener as KeyboardListener
#from pynput.keyboard import Key, Controller
import PySimpleGUI as sg
import threading
from PIL import ImageGrab, Image, ImageDraw
import recommender
class GUI(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.model=torch.load('model\\test_model100.m',map_location=torch.device('cpu'))
self.img_path='data\grab1.png'
self.img_dealer_path='data\grab_dealer.png'
self.img_player_path='data\grab_player.png'
self.model.eval()
self.list=[]
self.hit=[sg.Text('x'),sg.In(size=(5,1),enable_events=True,key='hit_x'),sg.Text('y'),sg.In(size=(5,1),enable_events=True,key='hit_y'),sg.Radio(text='hit',group_id='check',enable_events=False,key='hit_check')]
self.stand=[sg.Text('x'),sg.In(size=(5,1),enable_events=True,key='stand_x'),sg.Text('y'),sg.In(size=(5,1),enable_events=True,key='stand_y'),sg.Radio(text='stand',group_id='check',enable_events=False,key='stand_check')]
self.split=[sg.Text('x'),sg.In(size=(5,1),enable_events=True,key='split_x'),sg.Text('y'),sg.In(size=(5,1),enable_events=True,key='split_y'),sg.Radio(text='split',group_id='check',enable_events=False,key='split_check')]
self.double=[sg.Text('x'),sg.In(size=(5,1),enable_events=True,key='double_x'),sg.Text('y'),sg.In(size=(5,1),enable_events=True,key='double_y'),sg.Radio(text='double',group_id='check',enable_events=False,key='double_check')]
self.deal=[sg.Text('x'),sg.In(size=(5,1),enable_events=True,key='deal_x'),sg.Text('y'),sg.In(size=(5,1),enable_events=True,key='deal_y'),sg.Radio(text='deal',group_id='check',enable_events=False,key='deal_check')]
self.extra1=[sg.Text('x'),sg.In(size=(5,1),enable_events=True,key='extra1_x'),sg.Text('y'),sg.In(size=(5,1),enable_events=True,key='extra1_y'),sg.Radio(text='extra1',group_id='check',enable_events=False,key='extra1_check')]
self.extra2=[sg.Text('x'),sg.In(size=(5,1),enable_events=True,key='extra2_x'),sg.Text('y'),sg.In(size=(5,1),enable_events=True,key='extra2_y'),sg.Radio(text='extra2',group_id='check',enable_events=False,key='extra2_check')]
self.start_end_XY_Radio=[sg.Radio(text='grab area',group_id='grab_check',enable_events=False,key='grab_check')]
self.start_end_Y=[sg.Text('start x'),sg.In(size=(5,1),enable_events=True,key='mouse_x_start'),sg.Text('end x'),sg.In(size=(5,1),enable_events=True,key='mouse_x_end')]
self.start_end_X=[sg.Text('start y'),sg.In(size=(5,1),enable_events=True,key='mouse_y_start'),sg.Text('end y'),sg.In(size=(5,1),enable_events=True,key='mouse_y_end')]
self.separator=[sg.HorizontalSeparator()]
self.image_window = [sg.Image(filename="data\MatejkoKonst3Maj1791.png",background_color='white',enable_events=True,size=(300,300),key='_image_')]
self.column_1=[
self.start_end_XY_Radio,
self.start_end_Y,
self.start_end_X,
self.separator,
[sg.Text('in text'), sg.In(size=(25, 1),enable_events=False,key='in')],
[sg.Button('grab',key='_grab_'),sg.Button('calc',key='_calc_'),sg.Button('remove',key='_remove_') ],
[sg.Listbox(values=self.list,enable_events=True,size=(30,10), key='_list_',auto_size_text=True)]
]
# self.column_3 = [
# ,
# [sg.Text('out text '), sg.In(size=(25, 1),enable_events=True,key='out')],
# [sg.Text('extra '), sg.In(size=(25, 1),enable_events=True,key='extra')],
# ]
self.column_2 = [
self.image_window
]
self.layout = [
[sg.Column(self.column_1),sg.Column(self.column_2,key='column_2')]
]
self.window = sg.Window('BlackJack',self.layout,resizable=True)
self.keyboard_listener=KeyboardListener(on_press=self.on_press, on_release=self.on_release)
self.mouse_listener = MouseListener(on_move=self.on_move, on_click=self.on_click,onscroll=self.on_scroll)
self.mouse = Controller()
self.mouse_button = Button
self.keyboard = Controller()
#self.keyboard_key=Key
def click_hit(self):
print((self.window['hit_x'].get()))
print((self.window['hit_y'].get()))
print(int(self.window['hit_x'].get()))
print(int(self.window['hit_y'].get()))
self.mouse.position=(int(self.window['hit_x'].get()),int(self.window['hit_y'].get()))
self.mouse.press(self.mouse_button.left)
self.mouse.release(self.mouse_button.left)
self.mouse.position=(int(self.window['hit_x'].get()),int(self.window['hit_y'].get()))
print(self.mouse.position)
def click_stand(self):
print((self.window['stand_x'].get()))
print((self.window['stand_y'].get()))
print(int(self.window['stand_x'].get()))
print(int(self.window['stand_y'].get()))
self.mouse.position=(int(self.window['stand_x'].get()),int(self.window['stand_y'].get()))
self.mouse.press(self.mouse_button.left)
self.mouse.release(self.mouse_button.left)
self.mouse.position=(int(self.window['stand_x'].get()),int(self.window['stand_y'].get()))
print(self.mouse.position)
def click_split(self):
print((self.window['split_x'].get()))
print((self.window['split_y'].get()))
print(int(self.window['split_x'].get()))
print(int(self.window['split_y'].get()))
self.mouse.position=(int(self.window['split_x'].get()),int(self.window['split_y'].get()))
self.mouse.press(self.mouse_button.left)
self.mouse.release(self.mouse_button.left)
self.mouse.position=(int(self.window['split_x'].get()),int(self.window['split_y'].get()))
print(self.mouse.position)
def click_double(self):
print((self.window['double_x'].get()))
print((self.window['double_y'].get()))
print(int(self.window['double_x'].get()))
print(int(self.window['double_y'].get()))
self.mouse.position=(int(self.window['double_x'].get()),int(self.window['double_y'].get()))
self.mouse.press(self.mouse_button.left)
self.mouse.release(self.mouse_button.left)
self.mouse.position=(int(self.window['double_x'].get()),int(self.window['double_y'].get()))
print(self.mouse.position)
def click_deal(self):
print((self.window['deal_x'].get()))
print((self.window['deal_y'].get()))
print(int(self.window['deal_x'].get()))
print(int(self.window['deal_y'].get()))
self.mouse.position=(int(self.window['deal_x'].get()),int(self.window['deal_y'].get()))
self.mouse.press(self.mouse_button.left)
self.mouse.release(self.mouse_button.left)
self.mouse.position=(int(self.window['deal_x'].get()),int(self.window['deal_y'].get()))
print(self.mouse.position)
def click_extra1(self):
print((self.window['extra1_x'].get()))
print((self.window['extra1_y'].get()))
print(int(self.window['extra1_x'].get()))
print(int(self.window['extra1_y'].get()))
self.mouse.position=(int(self.window['extra1_x'].get()),int(self.window['extra1_y'].get()))
self.mouse.press(self.mouse_button.left)
self.mouse.release(self.mouse_button.left)
self.mouse.position=(int(self.window['extra1_x'].get()),int(self.window['extra1_y'].get()))
print(self.mouse.position)
def click_extra2(self):
print((self.window['extra2_x'].get()))
print((self.window['extra2_y'].get()))
print(int(self.window['extra2_x'].get()))
print(int(self.window['extra2_y'].get()))
self.mouse.position=(int(self.window['extra2_x'].get()),int(self.window['extra2_y'].get()))
self.mouse.press(self.mouse_button.left)
self.mouse.release(self.mouse_button.left)
self.mouse.position=(int(self.window['extra2_x'].get()),int(self.window['extra2_y'].get()))
print(self.mouse.position)
def run(self):
self.start_gui()
def start_gui(self):
#start mouse and keyboard listeners
self.keyboard_listener.start()
self.mouse_listener.start()
#######this blocks dont use########
#self.keyboard_listener.join()
#self.mouseListener.join()
###################################
in_text=""
out_text=""
extra=""
while True:
event, values = self.window.read()
if event=='in':
print()
if event=='out':
out_text=values['out']
if event=='extra':
extra=values['extra']
if event == "_grab_" and self.grab_area_isSet()==True:
img=ImageGrab.grab(bbox=(int(self.window['mouse_x_start'].get()),int(self.window['mouse_y_start'].get()),int(self.window['mouse_x_end'].get()),int(self.window['mouse_y_end'].get())),include_layered_windows=True,all_screens=True)
#img.show()
img.save(self.img_path)
self.window['_image_'].update(filename= self.img_path)
#print("grab")
img_dealer=img=ImageGrab.grab(bbox=(int(self.window['mouse_x_start'].get()),int(self.window['mouse_y_start'].get()),int(self.window['mouse_x_end'].get()),int(self.window['mouse_y_start'].get())+(int(self.window['mouse_y_end'].get())-int(self.window['mouse_y_start'].get()))/2),include_layered_windows=True,all_screens=True)
img_dealer.save(self.img_dealer_path)
img_player=img=ImageGrab.grab(bbox=(int(self.window['mouse_x_start'].get()),int(self.window['mouse_y_start'].get())+(int(self.window['mouse_y_end'].get())-int(self.window['mouse_y_start'].get()))/2,int(self.window['mouse_x_end'].get()),int(self.window['mouse_y_end'].get())),include_layered_windows=True,all_screens=True)
img_player.save(self.img_player_path)
#img_dealer.show()
#img_player.show()
if event=='_calc_':
#send to model
img=Image.open( self.img_path).convert("RGB")
img_dealer=Image.open( self.img_dealer_path).convert("RGB")
img_player=Image.open(self.img_player_path).convert("RGB")
#img=(read_image('data\grab1.png')).float()
a = transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225] )])
img=a(img)
img_dealer=Image.open(self.img_dealer_path).convert("RGB")
img_dealer=a(img_dealer)
img_player=Image.open(self.img_player_path).convert("RGB")
img_player=a(img_player)
#predictions = self.model([img])
predictions_dealer = self.model([img_dealer])
predictions_player = self.model([img_player])
#pred_boxes=predictions[0]['boxes'].detach().numpy()
#pred_labels=predictions[0]['labels'].detach().numpy()
#pred_scores=predictions[0]['scores'].detach().numpy()
half_image=(int(self.window['mouse_y_end'].get())-int(self.window['mouse_y_start'].get()))/2
#print('half image:',half_image)
dealer_pred_boxes=predictions_dealer[0]['boxes'].detach().numpy()
dealer_pred_labels=predictions_dealer[0]['labels'].detach().numpy()
dealer_pred_scores=list(predictions_dealer[0]['scores'].detach().numpy())
player_pred_boxes=predictions_player[0]['boxes'].detach().numpy()
player_pred_labels=predictions_player[0]['labels'].detach().numpy()
player_pred_scores=predictions_player[0]['scores'].detach().numpy()
out_img=Image.open( self.img_path)
draw=ImageDraw.Draw(out_img)
#for i in range(0,len(pred_scores)):
# if pred_boxes[i][1]< half_image and pred_boxes[i][3]<half_image:
# #dealers
# dealer_pred_boxes.append(pred_boxes[i])
# draw.rectangle([(pred_boxes[i][0],pred_boxes[i][1]),(pred_boxes[i][2],pred_boxes[i][3])],outline='red',width=2)
# dealer_pred_labels.append(pred_labels[i])
# dealer_pred_scores.append(pred_scores[i])
# elif pred_boxes[i][1]> half_image and pred_boxes[i][3]>half_image:
# #players
# player_pred_boxes.append(pred_boxes[i])
# draw.rectangle([(pred_boxes[i][0],pred_boxes[i][1]),(pred_boxes[i][2],pred_boxes[i][3])],outline='black',width=2)
# player_pred_labels.append(pred_labels[i])
# player_pred_scores.append(pred_scores[i])
for i in range(0,len(dealer_pred_scores)):
draw.rectangle([(dealer_pred_boxes[i][0],dealer_pred_boxes[i][1]),(dealer_pred_boxes[i][2],dealer_pred_boxes[i][3])],outline='red',width=1)
for i in range(0,len(player_pred_scores)):
draw.rectangle([(player_pred_boxes[i][0],half_image+player_pred_boxes[i][1]),(player_pred_boxes[i][2],half_image+player_pred_boxes[i][3])],outline='black',width=1)
player_pred_labels=list(set(player_pred_labels))
print(player_pred_labels)
out_img.save( self.img_path)
self.window['_image_'].update(filename= self.img_path)
print('dealer:')
print(dealer_pred_boxes)
print(dealer_pred_labels)
print(dealer_pred_scores)
print('player:')
print(player_pred_boxes)
print(player_pred_labels)
print(player_pred_scores)
self.list=[]
self.window['_list_'].update(self.list)
dealer_sum=(dealer_pred_labels[dealer_pred_scores.index(max(dealer_pred_scores))] if len(dealer_pred_scores)>0 else -1) if (dealer_pred_labels[dealer_pred_scores.index(max(dealer_pred_scores))] if len(dealer_pred_scores)>0 else -1) <10 else 10
n=int(self.window['in'].get() if self.window['in'].get() !='' else 2 )
print('nbr of cards:',n)
player_sum=0
player_cards=[]
for i,j in zip(range(0,n), range(0,len(player_pred_labels))):
player_sum=player_sum+(player_pred_labels[i] if player_pred_labels[i] <10 else 10)
player_cards.append(player_pred_labels[i])
print(player_sum)
self.list.append(('dealer sum:', dealer_sum ))
self.list.append(('dealer card:', (dealer_pred_labels[dealer_pred_scores.index(max(dealer_pred_scores))] if len(dealer_pred_scores)>0 else -1)))
self.window['_list_'].update(self.list)
self.list.append(('player sum:',player_sum))
self.list.append(('player cards:',player_cards))
self.window['_list_'].update(self.list)
if player_sum <21 and dealer_sum !=-1 and player_sum:
#send to recommender system
recommendation = recommender.recommender(a=0,p=player_sum,d=dealer_sum)
#make a decision
if recommendation.thorpe()==0:
self.list.append(('recommendation: STAND'))
self.window['_list_'].update(self.list)
#self.click_stand()
elif recommendation.thorpe()==1:
self.list.append(('recommendation: HIT'))
self.window['_list_'].update(self.list)
#self.click_hit()
elif recommendation.thorpe()==2:
self.list.append(('recommendation: DOUBLE'))
self.window['_list_'].update(self.list)
#self.click_double()
else:
self.list.append(('!!! BUST !!!!'))
self.window['_list_'].update(self.list)
if event=='_remove_':
if len(values['_list_'])>0:
self.list.remove(in_text+';'+out_text+';'+extra)
self.window['_list_'].update(self.list)
if event=='_list_':
if len(values['_list_'])>1:
list_item_split=values['_list_'][0].strip().split(';')
self.window['in'].update(list_item_split[0])
in_text=values['in']
self.window['out'].update(list_item_split[1])
out_text=values['out']
self.window['extra'].update(list_item_split[2])
extra=values['extra']
if event == sg.WIN_CLOSED:
break
#mouse and keyboard events
def on_press(self,key):
print(key)
def on_release(self,key):
print(key)
def on_move(self,x,y):
x
def on_click(self,x,y,button,pressed):
#self.list.append(button)
#self.list.append(pressed)
#self.window['_list_'].update(self.list)
if len(self.list) >100000: self.list=[]
#on press and release get pointer position
if pressed:
if self.window['grab_check'].get()==True:
self.window['mouse_x_start'].update(x)
self.window['mouse_y_start'].update(y)
else:
if self.window['grab_check'].get()==True:
self.window['mouse_x_end'].update(x)
self.window['mouse_y_end'].update(y)
self.window['grab_check'].update(value=False)
def on_scroll(self,x,y,dx,dy):
print("scroll")
def grab_area_isSet(self):
ret=False
if int(self.window['mouse_x_start'].get()) < int(self.window['mouse_x_end'].get()) and int(self.window['mouse_y_start'].get()) < int(self.window['mouse_y_end'].get()):
ret=True
else:
msg="Grab area must be set n 1. Check the area set button \n 2. Click and drag mouse pointer to mark the area \n 3. Do so from upper left to lower right "
self.list.append(msg)
self.window['_list_'].update(self.list)
print(msg)
ret=False
return ret
##### testing gui
#gui=GUI()
#gui.start()
| [
"PySimpleGUI.Button",
"PIL.ImageDraw.Draw",
"PySimpleGUI.HorizontalSeparator",
"PySimpleGUI.Image",
"pynput.mouse.Controller",
"threading.Thread.__init__",
"PySimpleGUI.In",
"torchvision.transforms.ToTensor",
"PySimpleGUI.Listbox",
"PySimpleGUI.Column",
"PySimpleGUI.Text",
"torchvision.transfo... | [((827, 858), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (852, 858), False, 'import threading\n'), ((4520, 4571), 'PySimpleGUI.Window', 'sg.Window', (['"""BlackJack"""', 'self.layout'], {'resizable': '(True)'}), "('BlackJack', self.layout, resizable=True)\n", (4529, 4571), True, 'import PySimpleGUI as sg\n'), ((4601, 4669), 'pynput.keyboard.Listener', 'KeyboardListener', ([], {'on_press': 'self.on_press', 'on_release': 'self.on_release'}), '(on_press=self.on_press, on_release=self.on_release)\n', (4617, 4669), True, 'from pynput.keyboard import Listener as KeyboardListener\n'), ((4700, 4789), 'pynput.mouse.Listener', 'MouseListener', ([], {'on_move': 'self.on_move', 'on_click': 'self.on_click', 'onscroll': 'self.on_scroll'}), '(on_move=self.on_move, on_click=self.on_click, onscroll=self.\n on_scroll)\n', (4713, 4789), True, 'from pynput.mouse import Listener as MouseListener\n'), ((4805, 4817), 'pynput.mouse.Controller', 'Controller', ([], {}), '()\n', (4815, 4817), False, 'from pynput.mouse import Button, Controller\n'), ((4877, 4889), 'pynput.mouse.Controller', 'Controller', ([], {}), '()\n', (4887, 4889), False, 'from pynput.mouse import Button, Controller\n'), ((1156, 1168), 'PySimpleGUI.Text', 'sg.Text', (['"""x"""'], {}), "('x')\n", (1163, 1168), True, 'import PySimpleGUI as sg\n'), ((1169, 1220), 'PySimpleGUI.In', 'sg.In', ([], {'size': '(5, 1)', 'enable_events': '(True)', 'key': '"""hit_x"""'}), "(size=(5, 1), enable_events=True, key='hit_x')\n", (1174, 1220), True, 'import PySimpleGUI as sg\n'), ((1218, 1230), 'PySimpleGUI.Text', 'sg.Text', (['"""y"""'], {}), "('y')\n", (1225, 1230), True, 'import PySimpleGUI as sg\n'), ((1231, 1282), 'PySimpleGUI.In', 'sg.In', ([], {'size': '(5, 1)', 'enable_events': '(True)', 'key': '"""hit_y"""'}), "(size=(5, 1), enable_events=True, key='hit_y')\n", (1236, 1282), True, 'import PySimpleGUI as sg\n'), ((1280, 1356), 'PySimpleGUI.Radio', 'sg.Radio', ([], {'text': '"""hit"""', 'group_id': '"""check"""', 'enable_events': '(False)', 'key': '"""hit_check"""'}), "(text='hit', group_id='check', enable_events=False, key='hit_check')\n", (1288, 1356), True, 'import PySimpleGUI as sg\n'), ((1375, 1387), 'PySimpleGUI.Text', 'sg.Text', (['"""x"""'], {}), "('x')\n", (1382, 1387), True, 'import PySimpleGUI as sg\n'), ((1388, 1441), 'PySimpleGUI.In', 'sg.In', ([], {'size': '(5, 1)', 'enable_events': '(True)', 'key': '"""stand_x"""'}), "(size=(5, 1), enable_events=True, key='stand_x')\n", (1393, 1441), True, 'import PySimpleGUI as sg\n'), ((1439, 1451), 'PySimpleGUI.Text', 'sg.Text', (['"""y"""'], {}), "('y')\n", (1446, 1451), True, 'import PySimpleGUI as sg\n'), ((1452, 1505), 'PySimpleGUI.In', 'sg.In', ([], {'size': '(5, 1)', 'enable_events': '(True)', 'key': '"""stand_y"""'}), "(size=(5, 1), enable_events=True, key='stand_y')\n", (1457, 1505), True, 'import PySimpleGUI as sg\n'), ((1503, 1588), 'PySimpleGUI.Radio', 'sg.Radio', ([], {'text': '"""stand"""', 'group_id': '"""check"""', 'enable_events': '(False)', 'key': '"""stand_check"""'}), "(text='stand', group_id='check', enable_events=False, key='stand_check'\n )\n", (1511, 1588), True, 'import PySimpleGUI as sg\n'), ((1602, 1614), 'PySimpleGUI.Text', 'sg.Text', (['"""x"""'], {}), "('x')\n", (1609, 1614), True, 'import PySimpleGUI as sg\n'), ((1615, 1668), 'PySimpleGUI.In', 'sg.In', ([], {'size': '(5, 1)', 'enable_events': '(True)', 'key': '"""split_x"""'}), "(size=(5, 1), enable_events=True, key='split_x')\n", (1620, 1668), True, 'import PySimpleGUI as sg\n'), ((1666, 1678), 'PySimpleGUI.Text', 'sg.Text', (['"""y"""'], {}), "('y')\n", (1673, 1678), True, 'import PySimpleGUI as sg\n'), ((1679, 1732), 'PySimpleGUI.In', 'sg.In', ([], {'size': '(5, 1)', 'enable_events': '(True)', 'key': '"""split_y"""'}), "(size=(5, 1), enable_events=True, key='split_y')\n", (1684, 1732), True, 'import PySimpleGUI as sg\n'), ((1730, 1815), 'PySimpleGUI.Radio', 'sg.Radio', ([], {'text': '"""split"""', 'group_id': '"""check"""', 'enable_events': '(False)', 'key': '"""split_check"""'}), "(text='split', group_id='check', enable_events=False, key='split_check'\n )\n", (1738, 1815), True, 'import PySimpleGUI as sg\n'), ((1830, 1842), 'PySimpleGUI.Text', 'sg.Text', (['"""x"""'], {}), "('x')\n", (1837, 1842), True, 'import PySimpleGUI as sg\n'), ((1843, 1897), 'PySimpleGUI.In', 'sg.In', ([], {'size': '(5, 1)', 'enable_events': '(True)', 'key': '"""double_x"""'}), "(size=(5, 1), enable_events=True, key='double_x')\n", (1848, 1897), True, 'import PySimpleGUI as sg\n'), ((1895, 1907), 'PySimpleGUI.Text', 'sg.Text', (['"""y"""'], {}), "('y')\n", (1902, 1907), True, 'import PySimpleGUI as sg\n'), ((1908, 1962), 'PySimpleGUI.In', 'sg.In', ([], {'size': '(5, 1)', 'enable_events': '(True)', 'key': '"""double_y"""'}), "(size=(5, 1), enable_events=True, key='double_y')\n", (1913, 1962), True, 'import PySimpleGUI as sg\n'), ((1960, 2047), 'PySimpleGUI.Radio', 'sg.Radio', ([], {'text': '"""double"""', 'group_id': '"""check"""', 'enable_events': '(False)', 'key': '"""double_check"""'}), "(text='double', group_id='check', enable_events=False, key=\n 'double_check')\n", (1968, 2047), True, 'import PySimpleGUI as sg\n'), ((2060, 2072), 'PySimpleGUI.Text', 'sg.Text', (['"""x"""'], {}), "('x')\n", (2067, 2072), True, 'import PySimpleGUI as sg\n'), ((2073, 2125), 'PySimpleGUI.In', 'sg.In', ([], {'size': '(5, 1)', 'enable_events': '(True)', 'key': '"""deal_x"""'}), "(size=(5, 1), enable_events=True, key='deal_x')\n", (2078, 2125), True, 'import PySimpleGUI as sg\n'), ((2123, 2135), 'PySimpleGUI.Text', 'sg.Text', (['"""y"""'], {}), "('y')\n", (2130, 2135), True, 'import PySimpleGUI as sg\n'), ((2136, 2188), 'PySimpleGUI.In', 'sg.In', ([], {'size': '(5, 1)', 'enable_events': '(True)', 'key': '"""deal_y"""'}), "(size=(5, 1), enable_events=True, key='deal_y')\n", (2141, 2188), True, 'import PySimpleGUI as sg\n'), ((2186, 2264), 'PySimpleGUI.Radio', 'sg.Radio', ([], {'text': '"""deal"""', 'group_id': '"""check"""', 'enable_events': '(False)', 'key': '"""deal_check"""'}), "(text='deal', group_id='check', enable_events=False, key='deal_check')\n", (2194, 2264), True, 'import PySimpleGUI as sg\n'), ((2284, 2296), 'PySimpleGUI.Text', 'sg.Text', (['"""x"""'], {}), "('x')\n", (2291, 2296), True, 'import PySimpleGUI as sg\n'), ((2297, 2351), 'PySimpleGUI.In', 'sg.In', ([], {'size': '(5, 1)', 'enable_events': '(True)', 'key': '"""extra1_x"""'}), "(size=(5, 1), enable_events=True, key='extra1_x')\n", (2302, 2351), True, 'import PySimpleGUI as sg\n'), ((2349, 2361), 'PySimpleGUI.Text', 'sg.Text', (['"""y"""'], {}), "('y')\n", (2356, 2361), True, 'import PySimpleGUI as sg\n'), ((2362, 2416), 'PySimpleGUI.In', 'sg.In', ([], {'size': '(5, 1)', 'enable_events': '(True)', 'key': '"""extra1_y"""'}), "(size=(5, 1), enable_events=True, key='extra1_y')\n", (2367, 2416), True, 'import PySimpleGUI as sg\n'), ((2414, 2501), 'PySimpleGUI.Radio', 'sg.Radio', ([], {'text': '"""extra1"""', 'group_id': '"""check"""', 'enable_events': '(False)', 'key': '"""extra1_check"""'}), "(text='extra1', group_id='check', enable_events=False, key=\n 'extra1_check')\n", (2422, 2501), True, 'import PySimpleGUI as sg\n'), ((2516, 2528), 'PySimpleGUI.Text', 'sg.Text', (['"""x"""'], {}), "('x')\n", (2523, 2528), True, 'import PySimpleGUI as sg\n'), ((2529, 2583), 'PySimpleGUI.In', 'sg.In', ([], {'size': '(5, 1)', 'enable_events': '(True)', 'key': '"""extra2_x"""'}), "(size=(5, 1), enable_events=True, key='extra2_x')\n", (2534, 2583), True, 'import PySimpleGUI as sg\n'), ((2581, 2593), 'PySimpleGUI.Text', 'sg.Text', (['"""y"""'], {}), "('y')\n", (2588, 2593), True, 'import PySimpleGUI as sg\n'), ((2594, 2648), 'PySimpleGUI.In', 'sg.In', ([], {'size': '(5, 1)', 'enable_events': '(True)', 'key': '"""extra2_y"""'}), "(size=(5, 1), enable_events=True, key='extra2_y')\n", (2599, 2648), True, 'import PySimpleGUI as sg\n'), ((2646, 2733), 'PySimpleGUI.Radio', 'sg.Radio', ([], {'text': '"""extra2"""', 'group_id': '"""check"""', 'enable_events': '(False)', 'key': '"""extra2_check"""'}), "(text='extra2', group_id='check', enable_events=False, key=\n 'extra2_check')\n", (2654, 2733), True, 'import PySimpleGUI as sg\n'), ((2760, 2853), 'PySimpleGUI.Radio', 'sg.Radio', ([], {'text': '"""grab area"""', 'group_id': '"""grab_check"""', 'enable_events': '(False)', 'key': '"""grab_check"""'}), "(text='grab area', group_id='grab_check', enable_events=False, key=\n 'grab_check')\n", (2768, 2853), True, 'import PySimpleGUI as sg\n'), ((2873, 2891), 'PySimpleGUI.Text', 'sg.Text', (['"""start x"""'], {}), "('start x')\n", (2880, 2891), True, 'import PySimpleGUI as sg\n'), ((2892, 2951), 'PySimpleGUI.In', 'sg.In', ([], {'size': '(5, 1)', 'enable_events': '(True)', 'key': '"""mouse_x_start"""'}), "(size=(5, 1), enable_events=True, key='mouse_x_start')\n", (2897, 2951), True, 'import PySimpleGUI as sg\n'), ((2949, 2965), 'PySimpleGUI.Text', 'sg.Text', (['"""end x"""'], {}), "('end x')\n", (2956, 2965), True, 'import PySimpleGUI as sg\n'), ((2966, 3023), 'PySimpleGUI.In', 'sg.In', ([], {'size': '(5, 1)', 'enable_events': '(True)', 'key': '"""mouse_x_end"""'}), "(size=(5, 1), enable_events=True, key='mouse_x_end')\n", (2971, 3023), True, 'import PySimpleGUI as sg\n'), ((3048, 3066), 'PySimpleGUI.Text', 'sg.Text', (['"""start y"""'], {}), "('start y')\n", (3055, 3066), True, 'import PySimpleGUI as sg\n'), ((3067, 3126), 'PySimpleGUI.In', 'sg.In', ([], {'size': '(5, 1)', 'enable_events': '(True)', 'key': '"""mouse_y_start"""'}), "(size=(5, 1), enable_events=True, key='mouse_y_start')\n", (3072, 3126), True, 'import PySimpleGUI as sg\n'), ((3124, 3140), 'PySimpleGUI.Text', 'sg.Text', (['"""end y"""'], {}), "('end y')\n", (3131, 3140), True, 'import PySimpleGUI as sg\n'), ((3141, 3198), 'PySimpleGUI.In', 'sg.In', ([], {'size': '(5, 1)', 'enable_events': '(True)', 'key': '"""mouse_y_end"""'}), "(size=(5, 1), enable_events=True, key='mouse_y_end')\n", (3146, 3198), True, 'import PySimpleGUI as sg\n'), ((3221, 3245), 'PySimpleGUI.HorizontalSeparator', 'sg.HorizontalSeparator', ([], {}), '()\n', (3243, 3245), True, 'import PySimpleGUI as sg\n'), ((3276, 3410), 'PySimpleGUI.Image', 'sg.Image', ([], {'filename': '"""data\\\\MatejkoKonst3Maj1791.png"""', 'background_color': '"""white"""', 'enable_events': '(True)', 'size': '(300, 300)', 'key': '"""_image_"""'}), "(filename='data\\\\MatejkoKonst3Maj1791.png', background_color=\n 'white', enable_events=True, size=(300, 300), key='_image_')\n", (3284, 3410), True, 'import PySimpleGUI as sg\n'), ((927, 946), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (939, 946), False, 'import torch\n'), ((3624, 3642), 'PySimpleGUI.Text', 'sg.Text', (['"""in text"""'], {}), "('in text')\n", (3631, 3642), True, 'import PySimpleGUI as sg\n'), ((3644, 3694), 'PySimpleGUI.In', 'sg.In', ([], {'size': '(25, 1)', 'enable_events': '(False)', 'key': '"""in"""'}), "(size=(25, 1), enable_events=False, key='in')\n", (3649, 3694), True, 'import PySimpleGUI as sg\n'), ((3720, 3751), 'PySimpleGUI.Button', 'sg.Button', (['"""grab"""'], {'key': '"""_grab_"""'}), "('grab', key='_grab_')\n", (3729, 3751), True, 'import PySimpleGUI as sg\n'), ((3751, 3782), 'PySimpleGUI.Button', 'sg.Button', (['"""calc"""'], {'key': '"""_calc_"""'}), "('calc', key='_calc_')\n", (3760, 3782), True, 'import PySimpleGUI as sg\n'), ((3782, 3817), 'PySimpleGUI.Button', 'sg.Button', (['"""remove"""'], {'key': '"""_remove_"""'}), "('remove', key='_remove_')\n", (3791, 3817), True, 'import PySimpleGUI as sg\n'), ((3845, 3948), 'PySimpleGUI.Listbox', 'sg.Listbox', ([], {'values': 'self.list', 'enable_events': '(True)', 'size': '(30, 10)', 'key': '"""_list_"""', 'auto_size_text': '(True)'}), "(values=self.list, enable_events=True, size=(30, 10), key=\n '_list_', auto_size_text=True)\n", (3855, 3948), True, 'import PySimpleGUI as sg\n'), ((4397, 4421), 'PySimpleGUI.Column', 'sg.Column', (['self.column_1'], {}), '(self.column_1)\n', (4406, 4421), True, 'import PySimpleGUI as sg\n'), ((4422, 4462), 'PySimpleGUI.Column', 'sg.Column', (['self.column_2'], {'key': '"""column_2"""'}), "(self.column_2, key='column_2')\n", (4431, 4462), True, 'import PySimpleGUI as sg\n'), ((12657, 12682), 'PIL.Image.open', 'Image.open', (['self.img_path'], {}), '(self.img_path)\n', (12667, 12682), False, 'from PIL import ImageGrab, Image, ImageDraw\n'), ((12705, 12728), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['out_img'], {}), '(out_img)\n', (12719, 12728), False, 'from PIL import ImageGrab, Image, ImageDraw\n'), ((16220, 16276), 'recommender.recommender', 'recommender.recommender', ([], {'a': '(0)', 'p': 'player_sum', 'd': 'dealer_sum'}), '(a=0, p=player_sum, d=dealer_sum)\n', (16243, 16276), False, 'import recommender\n'), ((10888, 10913), 'PIL.Image.open', 'Image.open', (['self.img_path'], {}), '(self.img_path)\n', (10898, 10913), False, 'from PIL import ImageGrab, Image, ImageDraw\n'), ((10957, 10989), 'PIL.Image.open', 'Image.open', (['self.img_dealer_path'], {}), '(self.img_dealer_path)\n', (10967, 10989), False, 'from PIL import ImageGrab, Image, ImageDraw\n'), ((11033, 11065), 'PIL.Image.open', 'Image.open', (['self.img_player_path'], {}), '(self.img_player_path)\n', (11043, 11065), False, 'from PIL import ImageGrab, Image, ImageDraw\n'), ((11199, 11220), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (11218, 11220), False, 'from torchvision import datasets, models, transforms\n'), ((11221, 11296), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (11241, 11296), False, 'from torchvision import datasets, models, transforms\n'), ((11354, 11386), 'PIL.Image.open', 'Image.open', (['self.img_dealer_path'], {}), '(self.img_dealer_path)\n', (11364, 11386), False, 'from PIL import ImageGrab, Image, ImageDraw\n'), ((11470, 11502), 'PIL.Image.open', 'Image.open', (['self.img_player_path'], {}), '(self.img_player_path)\n', (11480, 11502), False, 'from PIL import ImageGrab, Image, ImageDraw\n')] |
#!/usr/bin/env python3
import os
import shlex
import subprocess
class Program:
def __init__(self, name, requires, location, setup, start):
self.name = name
self.requires = requires
self.location = location
self.setup_commands = setup
self.start = start
self._fetched = False
self._directory = None
def _run_command(self, command):
if isinstance(command, str):
command = shlex.split(command)
return subprocess.run(command, check=True)
def install_requirements(self):
for item in self.requires:
if item == "python3":
self._run_command("sudo apt-get install -y -q "
"python3 python3-pip")
elif item == "python2":
self._run_command("sudo apt-get install -y -q "
"python2")
else:
raise ValueError("Unsupported requirement %r" % item)
def fetch(self):
if self.location["type"] == "local":
self._directory = self.location["directory"]
else:
raise ValueError("Unsupported location %r" % self.location)
self._fetched = True
def setup(self):
for command in self.setup_commands:
self._run_command(command)
def run(self):
if not self._fetched:
print(self.name, "was run without being fetched")
print("Starting", self.name)
os.chdir(self._directory)
self._run_command(self.start)
| [
"os.chdir",
"subprocess.run",
"shlex.split"
] | [((495, 530), 'subprocess.run', 'subprocess.run', (['command'], {'check': '(True)'}), '(command, check=True)\n', (509, 530), False, 'import subprocess\n'), ((1494, 1519), 'os.chdir', 'os.chdir', (['self._directory'], {}), '(self._directory)\n', (1502, 1519), False, 'import os\n'), ((458, 478), 'shlex.split', 'shlex.split', (['command'], {}), '(command)\n', (469, 478), False, 'import shlex\n')] |
"""Uses a specified model to predicit missing characters from a file containing texts. """
from greek_char_bert.predict import (
MLMPredicter,
replace_square_brackets,
sentences_to_dicts,
)
from greek_char_bert.run_eval import convert_masking
from greek_data_prep.clean_data import clean_texts, CHARS_TO_REMOVE, CHARS_TO_REPLACE
from cltk.corpus.utils.formatter import cltk_normalize
import re
import argparse
def predict_from_file(path, model, use_sequential_decoding, align, step_len):
"""Runs prediction using the model on the texts located in the file given in path."""
max_seq_len = model.processor.max_seq_len - 2
with open(path, "r") as fp:
texts = fp.read().splitlines()
# prepare texts
texts = clean_texts(texts, CHARS_TO_REMOVE, CHARS_TO_REPLACE)
texts = [cltk_normalize(replace_square_brackets(t)) for t in texts]
texts = [t.replace(" ", "_") for t in texts]
results = []
# break up long texts
for t in texts:
sequences = []
if len(t) >= max_seq_len:
if not (step_len and step_len < max_seq_len):
step_len = round(max_seq_len / 2)
# for i in range(0, len(t) - step_len, step_len):
for i in range(0, len(t), step_len):
seq = t[i : i + max_seq_len]
sequences.append(seq)
else:
sequences.append(t)
sequences = convert_masking(sequences)
dicts = sentences_to_dicts(sequences)
if use_sequential_decoding:
result = model.predict_sequentially(dicts=dicts)
else:
result = model.predict(dicts=dicts)
results.append(result)
# output results
for result in results:
nb_of_masks = 0 # needed to proper alignment
for i, res in enumerate(result):
prediced_text = res["predictions"]["text_with_preds"].replace("_", " ")
masked_text = res["predictions"]["masked_text"].replace("_", " ")
if align:
if not step_len:
step_len = round(max_seq_len / 2)
# an approximate alignment is calculated by shifting each line by step_len + 2 * the number of masks in the overlaping portion of the previous prediction (to take into account the square brackets which are added around each prediction)
print(" " * (step_len * i + (2 * nb_of_masks)) + prediced_text)
nb_of_masks += len(re.findall(r"#+", masked_text[:step_len]))
else:
print(res["predictions"]["text_with_preds"].replace("_", " "))
if __name__ == "__main__":
model_path = "../../models/greek_char_BERT"
file = "../../data/prediction_test.txt"
parser = argparse.ArgumentParser(
description="Run prediction on a file containing one or more texts with missing characters."
)
parser.add_argument(
"-f",
"--file",
default=file,
help="The file with the texts. Missing characters are indicated by enclosing the number of full stops corresponding to the number of missing characters with square brackets, e.g.: μῆνιν ἄ[...]ε θεὰ Πηληϊάδεω Ἀχ[...]ος",
)
parser.add_argument(
"-m",
"--model_path",
default=model_path,
help="The path to the saved model to use for prediction.",
)
parser.add_argument(
"-s",
"--sequential_decoding",
default=False,
action="store_true",
help="Use sequential decoding (warning: very slow, especially without a GPU).",
)
parser.add_argument(
"-a",
"--align",
default=False,
action="store_true",
help="Align output from long texts which are broken up into multiple parts.",
)
parser.add_argument(
"--step_len",
type=int,
help="The step length to use when handling texts longer than the model's maximum input length.",
)
args = parser.parse_args()
file = args.file
model_path = args.model_path
use_sequential_decoding = args.sequential_decoding
align = args.align
step_len = args.step_len
model = MLMPredicter.load(model_path, batch_size=32)
predict_from_file(file, model, use_sequential_decoding, align, step_len)
| [
"greek_char_bert.predict.MLMPredicter.load",
"greek_data_prep.clean_data.clean_texts",
"greek_char_bert.run_eval.convert_masking",
"argparse.ArgumentParser",
"greek_char_bert.predict.sentences_to_dicts",
"re.findall",
"greek_char_bert.predict.replace_square_brackets"
] | [((746, 799), 'greek_data_prep.clean_data.clean_texts', 'clean_texts', (['texts', 'CHARS_TO_REMOVE', 'CHARS_TO_REPLACE'], {}), '(texts, CHARS_TO_REMOVE, CHARS_TO_REPLACE)\n', (757, 799), False, 'from greek_data_prep.clean_data import clean_texts, CHARS_TO_REMOVE, CHARS_TO_REPLACE\n'), ((2728, 2855), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run prediction on a file containing one or more texts with missing characters."""'}), "(description=\n 'Run prediction on a file containing one or more texts with missing characters.'\n )\n", (2751, 2855), False, 'import argparse\n'), ((4138, 4182), 'greek_char_bert.predict.MLMPredicter.load', 'MLMPredicter.load', (['model_path'], {'batch_size': '(32)'}), '(model_path, batch_size=32)\n', (4155, 4182), False, 'from greek_char_bert.predict import MLMPredicter, replace_square_brackets, sentences_to_dicts\n'), ((1409, 1435), 'greek_char_bert.run_eval.convert_masking', 'convert_masking', (['sequences'], {}), '(sequences)\n', (1424, 1435), False, 'from greek_char_bert.run_eval import convert_masking\n'), ((1452, 1481), 'greek_char_bert.predict.sentences_to_dicts', 'sentences_to_dicts', (['sequences'], {}), '(sequences)\n', (1470, 1481), False, 'from greek_char_bert.predict import MLMPredicter, replace_square_brackets, sentences_to_dicts\n'), ((828, 854), 'greek_char_bert.predict.replace_square_brackets', 'replace_square_brackets', (['t'], {}), '(t)\n', (851, 854), False, 'from greek_char_bert.predict import MLMPredicter, replace_square_brackets, sentences_to_dicts\n'), ((2453, 2493), 're.findall', 're.findall', (['"""#+"""', 'masked_text[:step_len]'], {}), "('#+', masked_text[:step_len])\n", (2463, 2493), False, 'import re\n')] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
import os, sys
importPath = os.path.abspath(os.getcwd())
sys.path.insert(1, importPath)
"""
Package system
"""
# Folders
# Files
# from . import disectTelemetryLogV2
# from . import drive_utility
# from . import getTelemetry
# from . import testallsata
# from . import twidlDictGen
| [
"sys.path.insert",
"os.getcwd"
] | [((289, 319), 'sys.path.insert', 'sys.path.insert', (['(1)', 'importPath'], {}), '(1, importPath)\n', (304, 319), False, 'import os, sys\n'), ((275, 286), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (284, 286), False, 'import os, sys\n')] |
# modules for MT940 ABN-AMRO
import re
from mt940e_v2 import Editor
import pdb # noqa F401
# decode MT940 86 strings and deducts payee and memo
# for MT940 ABN-AMRO output
class ParseMT940:
cutoff = 65
@classmethod
def code86(cls, string86, bank_account, date, amount):
''' determine the code in line 86 on how to handle it. The first
5 characters are taken as the code
'''
_search = re.search('^.{5}', string86)
code = _search.group()
# modify string86 so it will handle '/' in text correctly
string86 = re.sub(r"(/\D{3}/|/\D{4}/)", r"/\1", string86)
if code == '':
assert False, "something not right there should alwats be a code"
elif code == '/TRTP':
_search = re.search('//IBAN/(.*?(?=//))', string86)
if _search:
payee = _search.group(1) + ' - '
_search = re.search('//NAME/(.*?(?=//))', string86)
if _search:
payee = payee + _search.group(1)
string86 = re.sub(r"(/\D{3}/|/\D{4}/)", r"/\1", string86)
_search = re.search('//REMI/(.*?(?=//))', string86)
if _search:
memo = _search.group(1)
else:
memo = ''
if memo.isdigit():
_search = re.search('//REMI/.*?(?=/)/(.*?(?=//))', string86)
if _search:
memo = memo + '/' + _search.group(1)
elif code == 'SEPA ':
_search = re.search('IBAN:(.*?(?=BIC:))', string86)
if not _search:
_search = re.search('INCASSANT:(.*?(?=NAAM:))', string86)
if _search:
payee = _search.group(1).strip() + ' - '
_search = re.search('NAAM:(.*?(?=OMSCHRIJVING:))', string86)
if _search:
payee = payee + _search.group(1).strip()
_search = re.search('OMSCHRIJVING:(.*$)', string86)
if _search:
memo = (_search.group(1)).strip()
elif code == 'BEA ':
payee = 'PIN: '
_search = re.search(r'(\d\d.\d\d\.\d\d/\d\d.\d\d\s.*$)', string86)
if _search:
memo = _search.group(1)
_search = re.search(r'\d\d.\d\d\.\d\d/\d\d.\d\d\s(.*?(?=,))',
memo).group(1)
if _search:
payee = payee + _search
elif code == 'GEA ':
payee = 'ATM: '
_search = re.search(r'(\d\d.\d\d\.\d\d/\d\d.\d\d\s.*$)', string86)
if _search:
memo = _search.group(1)
_search = re.search(r'\d\d.\d\d\.\d\d/\d\d.\d\d\s(.*?(?=,))',
memo).group(1)
payee = payee + _search
# where code is not defined call an editor program to
# manually parse it
else:
string86 = re.sub(' +', ' ', string86)
payee, memo = Editor.edit(string86, bank_account, date, amount)
memo = re.sub('\s+', ' ', memo.strip())[:cls.cutoff]
payee = re.sub('\s+', ' ', payee.strip())[:cls.cutoff]
return payee, memo
@staticmethod
def conv_amount_str(creditsign, amount_str):
''' converts amount and output amount in str value
'''
amount = amount_str.replace(',', '.')
if creditsign == 'D':
sign = '-'
else:
sign = ''
amount = '{0}{1}'.format(sign, amount)
if amount.endswith('.'):
amount = amount + '00'
return amount
@staticmethod
def transaction_date_conversion(v_date, t_date):
''' converts the date and checks if v_d and t_d are in the same year
input: v_date: yymmdd, t_date: mmdd
output: date ddmmyyyy
'''
date = ''
year = int(v_date[0:2])+2000 # this century only
# check if valuta date is December and transaction date is January
# add a year in that case
if (v_date[2:4] == '12') and (t_date[0:2] == '01'):
year = year+1
date = t_date[2:4]+'/'+t_date[0:2]+'/'+str(year)
return date
@staticmethod
def write_qif_record(qf, date, amount, payee, memo):
''' output to file with the qif format
- D<date>
- T<amount>
- P<payee>
- M<memo>
- ^
'''
qf.write('D%s\n' % date)
qf.write('T%s\n' % amount)
qf.write('P%s\n' % payee)
qf.write('M%s\n' % memo)
qf.write('^\n')
| [
"re.sub",
"mt940e_v2.Editor.edit",
"re.search"
] | [((439, 467), 're.search', 're.search', (['"""^.{5}"""', 'string86'], {}), "('^.{5}', string86)\n", (448, 467), False, 'import re\n'), ((585, 632), 're.sub', 're.sub', (['"""(/\\\\D{3}/|/\\\\D{4}/)"""', '"""/\\\\1"""', 'string86'], {}), "('(/\\\\D{3}/|/\\\\D{4}/)', '/\\\\1', string86)\n", (591, 632), False, 'import re\n'), ((787, 828), 're.search', 're.search', (['"""//IBAN/(.*?(?=//))"""', 'string86'], {}), "('//IBAN/(.*?(?=//))', string86)\n", (796, 828), False, 'import re\n'), ((925, 966), 're.search', 're.search', (['"""//NAME/(.*?(?=//))"""', 'string86'], {}), "('//NAME/(.*?(?=//))', string86)\n", (934, 966), False, 'import re\n'), ((1064, 1111), 're.sub', 're.sub', (['"""(/\\\\D{3}/|/\\\\D{4}/)"""', '"""/\\\\1"""', 'string86'], {}), "('(/\\\\D{3}/|/\\\\D{4}/)', '/\\\\1', string86)\n", (1070, 1111), False, 'import re\n'), ((1133, 1174), 're.search', 're.search', (['"""//REMI/(.*?(?=//))"""', 'string86'], {}), "('//REMI/(.*?(?=//))', string86)\n", (1142, 1174), False, 'import re\n'), ((1342, 1392), 're.search', 're.search', (['"""//REMI/.*?(?=/)/(.*?(?=//))"""', 'string86'], {}), "('//REMI/.*?(?=/)/(.*?(?=//))', string86)\n", (1351, 1392), False, 'import re\n'), ((1532, 1573), 're.search', 're.search', (['"""IBAN:(.*?(?=BIC:))"""', 'string86'], {}), "('IBAN:(.*?(?=BIC:))', string86)\n", (1541, 1573), False, 'import re\n'), ((1949, 1990), 're.search', 're.search', (['"""OMSCHRIJVING:(.*$)"""', 'string86'], {}), "('OMSCHRIJVING:(.*$)', string86)\n", (1958, 1990), False, 'import re\n'), ((1629, 1676), 're.search', 're.search', (['"""INCASSANT:(.*?(?=NAAM:))"""', 'string86'], {}), "('INCASSANT:(.*?(?=NAAM:))', string86)\n", (1638, 1676), False, 'import re\n'), ((1785, 1835), 're.search', 're.search', (['"""NAAM:(.*?(?=OMSCHRIJVING:))"""', 'string86'], {}), "('NAAM:(.*?(?=OMSCHRIJVING:))', string86)\n", (1794, 1835), False, 'import re\n'), ((2147, 2214), 're.search', 're.search', (['"""(\\\\d\\\\d.\\\\d\\\\d\\\\.\\\\d\\\\d/\\\\d\\\\d.\\\\d\\\\d\\\\s.*$)"""', 'string86'], {}), "('(\\\\d\\\\d.\\\\d\\\\d\\\\.\\\\d\\\\d/\\\\d\\\\d.\\\\d\\\\d\\\\s.*$)', string86)\n", (2156, 2214), False, 'import re\n'), ((2552, 2619), 're.search', 're.search', (['"""(\\\\d\\\\d.\\\\d\\\\d\\\\.\\\\d\\\\d/\\\\d\\\\d.\\\\d\\\\d\\\\s.*$)"""', 'string86'], {}), "('(\\\\d\\\\d.\\\\d\\\\d\\\\.\\\\d\\\\d/\\\\d\\\\d.\\\\d\\\\d\\\\s.*$)', string86)\n", (2561, 2619), False, 'import re\n'), ((2972, 2999), 're.sub', 're.sub', (['""" +"""', '""" """', 'string86'], {}), "(' +', ' ', string86)\n", (2978, 2999), False, 'import re\n'), ((3026, 3075), 'mt940e_v2.Editor.edit', 'Editor.edit', (['string86', 'bank_account', 'date', 'amount'], {}), '(string86, bank_account, date, amount)\n', (3037, 3075), False, 'from mt940e_v2 import Editor\n'), ((2296, 2364), 're.search', 're.search', (['"""\\\\d\\\\d.\\\\d\\\\d\\\\.\\\\d\\\\d/\\\\d\\\\d.\\\\d\\\\d\\\\s(.*?(?=,))"""', 'memo'], {}), "('\\\\d\\\\d.\\\\d\\\\d\\\\.\\\\d\\\\d/\\\\d\\\\d.\\\\d\\\\d\\\\s(.*?(?=,))', memo)\n", (2305, 2364), False, 'import re\n'), ((2701, 2769), 're.search', 're.search', (['"""\\\\d\\\\d.\\\\d\\\\d\\\\.\\\\d\\\\d/\\\\d\\\\d.\\\\d\\\\d\\\\s(.*?(?=,))"""', 'memo'], {}), "('\\\\d\\\\d.\\\\d\\\\d\\\\.\\\\d\\\\d/\\\\d\\\\d.\\\\d\\\\d\\\\s(.*?(?=,))', memo)\n", (2710, 2769), False, 'import re\n')] |
import os
from boggle import ASSET_PATH, DB_PATH
from boggle.board import Board
from boggle.dictionary import Dictionary
from boggle.boggleGame import BoggleGame
from boggle.match import Match
from flask import Flask, jsonify, abort, request, Response, render_template
import redis
app = Flask(__name__)
redisConn = redis.StrictRedis(host='localhost', port=6379, db=0)
# move this to config
dictionaryFP = os.path.join(ASSET_PATH, "dictionary.txt")
dictionary = Dictionary(dictionaryFP)
boardFP = os.path.join(ASSET_PATH, 'TestBoard.txt')
Board.load_from_file(boardFP, redisConn)
boggleGame = BoggleGame(dictionary, redisConn, DB_PATH)
@app.route("/status")
def status():
return "Health Check"
@app.route("/")
@app.route("/boggle/game", methods=['GET'])
def play_single():
return render_template('boggle-game.html')
# return random board from board cache
@app.route("/api/v1.0/boggle/boards", methods=['GET'])
def get_random_board():
board_string = boggleGame.get_random_board_string()
response = jsonify({"board_string": board_string})
return response
# find word in selected board
# return score based on char length
@app.route("/api/v1.0/boggle/boards/<string:board_string>/word/<string:word>", methods=['GET'])
def find_word(board_string:str, word:str):
# hardcoded 4x4 size for now, can add POST method with json data later if needed
if(len(board_string)!=16):
abort(400)
is_found = boggleGame.find_word(board_string, word)
score = len(word) if is_found else 0 # might want to move this to database
return jsonify({"score": score, "word": word})
# get match status
@app.route("/api/v1.0/boggle/matches/<string:match_id>", methods=['GET'])
def get_match(match_id:str):
match_status = boggleGame.get_match_status(match_id)
if match_status is None:
abort(400)
if match_status["status"] == "ongoing":
match_status = {
"match_id": match_status["match_id"],
"status": "ongoing"
}
return jsonify(match_status)
# create a new match
# return match_id
@app.route("/api/v1.0/boggle/matches", methods=['POST'])
def new_match():
match_id = boggleGame.create_new_match()
return jsonify({"match_id": match_id})
# finish a match
# give words used, and name?
@app.route("/api/v1.0/boggle/matches/<string:match_id>", methods=['PUT'])
def player_finish_match(match_id:str):
request_data = request.get_json()
name = request_data['name']
words = request_data['words']
match_status = boggleGame.update_match(match_id, name, words)
if match_status is None:
abort(400) # match is already completed
return jsonify(match_status)
# return board, player status
@app.route("/api/v1.0/boggle/matches/<string:match_id>/session", methods=['POST'])
def start_match(match_id:str):
board_string = boggleGame.start_match(match_id)
if board_string is None:
abort(400) # match is already completed
return jsonify({"match_id":match_id ,"board_string": board_string})
@app.route("/api/v1.0/boggle/boards", methods=['POST'])
def generate_board():
pass
if __name__ == '__main__':
app.run()
| [
"flask.render_template",
"flask.Flask",
"os.path.join",
"boggle.board.Board.load_from_file",
"flask.request.get_json",
"redis.StrictRedis",
"boggle.boggleGame.BoggleGame",
"boggle.dictionary.Dictionary",
"flask.abort",
"flask.jsonify"
] | [((292, 307), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (297, 307), False, 'from flask import Flask, jsonify, abort, request, Response, render_template\n'), ((320, 372), 'redis.StrictRedis', 'redis.StrictRedis', ([], {'host': '"""localhost"""', 'port': '(6379)', 'db': '(0)'}), "(host='localhost', port=6379, db=0)\n", (337, 372), False, 'import redis\n'), ((411, 453), 'os.path.join', 'os.path.join', (['ASSET_PATH', '"""dictionary.txt"""'], {}), "(ASSET_PATH, 'dictionary.txt')\n", (423, 453), False, 'import os\n'), ((467, 491), 'boggle.dictionary.Dictionary', 'Dictionary', (['dictionaryFP'], {}), '(dictionaryFP)\n', (477, 491), False, 'from boggle.dictionary import Dictionary\n'), ((503, 544), 'os.path.join', 'os.path.join', (['ASSET_PATH', '"""TestBoard.txt"""'], {}), "(ASSET_PATH, 'TestBoard.txt')\n", (515, 544), False, 'import os\n'), ((545, 585), 'boggle.board.Board.load_from_file', 'Board.load_from_file', (['boardFP', 'redisConn'], {}), '(boardFP, redisConn)\n', (565, 585), False, 'from boggle.board import Board\n'), ((600, 642), 'boggle.boggleGame.BoggleGame', 'BoggleGame', (['dictionary', 'redisConn', 'DB_PATH'], {}), '(dictionary, redisConn, DB_PATH)\n', (610, 642), False, 'from boggle.boggleGame import BoggleGame\n'), ((800, 835), 'flask.render_template', 'render_template', (['"""boggle-game.html"""'], {}), "('boggle-game.html')\n", (815, 835), False, 'from flask import Flask, jsonify, abort, request, Response, render_template\n'), ((1026, 1065), 'flask.jsonify', 'jsonify', (["{'board_string': board_string}"], {}), "({'board_string': board_string})\n", (1033, 1065), False, 'from flask import Flask, jsonify, abort, request, Response, render_template\n'), ((1574, 1613), 'flask.jsonify', 'jsonify', (["{'score': score, 'word': word}"], {}), "({'score': score, 'word': word})\n", (1581, 1613), False, 'from flask import Flask, jsonify, abort, request, Response, render_template\n'), ((2018, 2039), 'flask.jsonify', 'jsonify', (['match_status'], {}), '(match_status)\n', (2025, 2039), False, 'from flask import Flask, jsonify, abort, request, Response, render_template\n'), ((2210, 2241), 'flask.jsonify', 'jsonify', (["{'match_id': match_id}"], {}), "({'match_id': match_id})\n", (2217, 2241), False, 'from flask import Flask, jsonify, abort, request, Response, render_template\n'), ((2421, 2439), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (2437, 2439), False, 'from flask import Flask, jsonify, abort, request, Response, render_template\n'), ((2660, 2681), 'flask.jsonify', 'jsonify', (['match_status'], {}), '(match_status)\n', (2667, 2681), False, 'from flask import Flask, jsonify, abort, request, Response, render_template\n'), ((2967, 3028), 'flask.jsonify', 'jsonify', (["{'match_id': match_id, 'board_string': board_string}"], {}), "({'match_id': match_id, 'board_string': board_string})\n", (2974, 3028), False, 'from flask import Flask, jsonify, abort, request, Response, render_template\n'), ((1416, 1426), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (1421, 1426), False, 'from flask import Flask, jsonify, abort, request, Response, render_template\n'), ((1833, 1843), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (1838, 1843), False, 'from flask import Flask, jsonify, abort, request, Response, render_template\n'), ((2609, 2619), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (2614, 2619), False, 'from flask import Flask, jsonify, abort, request, Response, render_template\n'), ((2916, 2926), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (2921, 2926), False, 'from flask import Flask, jsonify, abort, request, Response, render_template\n')] |
import sys
from functools import reduce
def gcd(a, b):
while b:
a, b = b, a % b
return abs(a)
def lcm(a, b):
return abs(a // gcd(a, b) * b)
n, *t = map(int, sys.stdin.read().split())
def main():
return reduce(lcm, t)
if __name__ == "__main__":
ans = main()
print(ans)
| [
"functools.reduce",
"sys.stdin.read"
] | [((252, 266), 'functools.reduce', 'reduce', (['lcm', 't'], {}), '(lcm, t)\n', (258, 266), False, 'from functools import reduce\n'), ((197, 213), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (211, 213), False, 'import sys\n')] |
# -*- coding:utf-8 -*-
import socket
import random
import base64
import hashlib
import re
MAGIC_NUMBER = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
HOST = "localhost"
PORT = 5858
def main():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
######################################
# HandShake Start ####################
raw_key = bytes(random.getrandbits(8) for _ in range(16))
sec_ws_key = base64.b64encode(raw_key).decode("utf-8")
req = "GET / HTTP/1.1\r\n" + \
"Upgrade: websocket\r\n" \
f"Host: {HOST}:{PORT}\r\n" \
f"Origin: http://{HOST}:{PORT}\r\n" \
f"Sec-WebSocket-Key: {sec_ws_key}\r\n" \
"Sec-WebSocket-Version: 13\r\n" \
"Connection: upgrade\r\n\r\n"
s.sendall(req.encode("utf-8"))
buffer = bytearray()
while True:
buffer = buffer + bytearray(s.recv(8))
# HandShake의 맨 끝은 CRLF 두번
if 0 <= buffer.find(b"\r\n\r\n"):
break
regex = re.compile("Sec-WebSocket-Accept: (.+?)\r\n")
re_match = regex.search(buffer.decode("utf-8"))
resp_sec_ws_acpt = re_match.group(1)
chk_sec_ws_acpt = bytes(sec_ws_key + MAGIC_NUMBER, encoding="utf-8")
chk_sec_ws_acpt = base64.b64encode(hashlib.sha1(chk_sec_ws_acpt).digest()).decode("utf-8")
if resp_sec_ws_acpt == chk_sec_ws_acpt:
print("핸드쉐이크 성공 !!!")
else:
print("핸드쉐이크 실패 !!!")
return False
# HandShake End ######################
######################################
# TODO: 데이터 송신 및 수신
return True
if "__main__" == __name__:
main()
| [
"socket.socket",
"re.compile",
"base64.b64encode",
"random.getrandbits",
"hashlib.sha1"
] | [((199, 248), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (212, 248), False, 'import socket\n'), ((1101, 1146), 're.compile', 're.compile', (["'Sec-WebSocket-Accept: (.+?)\\r\\n'"], {}), "('Sec-WebSocket-Accept: (.+?)\\r\\n')\n", (1111, 1146), False, 'import re\n'), ((407, 428), 'random.getrandbits', 'random.getrandbits', (['(8)'], {}), '(8)\n', (425, 428), False, 'import random\n'), ((470, 495), 'base64.b64encode', 'base64.b64encode', (['raw_key'], {}), '(raw_key)\n', (486, 495), False, 'import base64\n'), ((1369, 1398), 'hashlib.sha1', 'hashlib.sha1', (['chk_sec_ws_acpt'], {}), '(chk_sec_ws_acpt)\n', (1381, 1398), False, 'import hashlib\n')] |
from copy import deepcopy
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
from apex import amp
from torch.cuda.amp import autocast as autocast
from transformers import BertModel, BertTokenizer
from util import text_processing
from collections import OrderedDict
from . import ops as ops
from .config import cfg
from .lcgn import LCGN, SemanLCGN
from .input_unit import Encoder
from .output_unit import Classifier
from .optimization import *
class SingleHop(nn.Module):
def __init__(self):
super().__init__()
self.proj_q = ops.Linear(cfg.ENC_DIM, cfg.CTX_DIM)
self.inter2att = ops.Linear(cfg.CTX_DIM, 1)
def forward(self, kb, vecQuestions, imagesObjectNum):
proj_q = self.proj_q(vecQuestions)
interactions = F.normalize(kb * proj_q[:, None, :], dim=-1)
raw_att = self.inter2att(interactions).squeeze(-1)# 128 * 49
raw_att = ops.apply_mask1d(raw_att, imagesObjectNum)
att = F.softmax(raw_att, dim=-1)
x_att = torch.bmm(att[:, None, :], kb).squeeze(1)
return x_att
class LCGNnet(nn.Module):
def __init__(self, num_vocab, num_choices):
super().__init__()
if cfg.INIT_WRD_EMB_FROM_FILE:
embeddingsInit = np.load(cfg.WRD_EMB_INIT_FILE) # 2956 * 300
assert embeddingsInit.shape == (num_vocab-1, cfg.WRD_EMB_DIM)
else:
embeddingsInit = np.random.randn(num_vocab-1, cfg.WRD_EMB_DIM)
self.num_vocab = num_vocab # 2957
self.num_choices = num_choices # 1845
self.tokenizer = BertTokenizer.from_pretrained('/home/xdjf/bert_config/bert-base-uncased')
self.model = BertModel.from_pretrained('/home/xdjf/bert_config/bert-base-uncased')
self.name_dict = text_processing.VocabDict(cfg.VOCAB_NAME_FILE)
name_embedding = self.reset_name_embedding()
self.encoder = Encoder(embeddingsInit, name_embedding)
self.lcgn = LCGN()
#self.sema_lcgn = SemanLCGN()
self.single_hop = SingleHop()
self.classifier = Classifier(num_choices)
#self.seman_encoder = ops.Linear(cfg.WRD_EMB_DIM, cfg.CMD_DIM)
def reset_name_embedding(self):
weight = torch.zeros(self.name_dict.num_vocab - 1, 768)
for word in self.name_dict.word_list:
if word == '<unk>':
continue
temp_embedding = self.extract_name_embedding(word)
weight[self.name_dict.word2idx(word) - 1] = temp_embedding
return weight
def extract_name_embedding(self, name):
token_name = self.tokenizer.encode(name, add_special_tokens=False)
input_ids = torch.tensor([token_name])
with torch.no_grad():
_, out = self.model(input_ids)
return out # 1* 768
def forward(self, batch):
#batchSize = len(batch['image_feat_batch'])
questionIndices = batch[0]
questionLengths = batch[1]
semanIndices = batch[2]
semanLengths = batch[3]
answerIndices = batch[4]
nameIndices = batch[5]
nameLengths = batch[6]
images = batch[7]
imagesObjectNum = batch[8]
batchSize = images.size(0)
# LSTM
questionCntxWords, vecQuestions, word_seman, encode_seman, name_embed = self.encoder(
questionIndices, questionLengths, # 128 * 30 * 512 128 * 512
semanIndices, semanLengths,
nameIndices, nameLengths)
encode_seman = encode_seman.permute(1, 0, 2)
#encode_seman = self.seman_encoder(encode_seman)
# semanCnt = semanCnt[:, 0, :]
# LCGN
x_out = self.lcgn(
images=images, q_encoding=vecQuestions,
lstm_outputs=questionCntxWords, word_seman=word_seman, encode_seman=encode_seman, semanIndices=semanIndices, batch_size=batchSize,
q_length=questionLengths, entity_num=imagesObjectNum, name_embed=name_embed, nameLengths=nameLengths)
# x_out_seman = self.sema_lcgn(
# images=images, seman_outputs=semanCnt,
# batch_size=batchSize, entity_num=imagesObjectNum)
# x_out = self.tensor_inter_graph_propagation(x_out, x_out_seman)
# Single-Hop
x_att = self.single_hop(x_out, vecQuestions, imagesObjectNum)
logits = self.classifier(x_att, vecQuestions) # 128 * 1845
predictions, num_correct = self.add_pred_op(logits, answerIndices)
loss = self.add_answer_loss_op(logits, answerIndices)
return {"predictions": predictions,
"batch_size": int(batchSize),
"num_correct": int(num_correct),
"loss": loss,
"accuracy": float(num_correct * 1. / batchSize)}
def tensor_inter_graph_propagation(self, x_out_1, x_out_2):
bsz, imageNum, dModel= x_out_1.size(0), x_out_1.size(1), x_out_1.size(2)
x_sum_1 = torch.sum(x_out_1, dim=1)
x_sum_2 = torch.sum(x_out_2, dim=1)
x_expand_1 = x_sum_1.repeat(1, 2)
x_expand_2 = x_sum_2.repeat(1, 2)
x_sum = torch.cat([x_expand_1, x_expand_2], -1)
x_sum = x_sum.unsqueeze(1)
x_sum = x_sum.repeat(1, imageNum, 1)
x_union = torch.cat([x_out_1, x_out_2], dim=-1)
x_union_expand = x_union.repeat(1, 1, 2)
x_kr = torch.mul(x_union_expand, x_sum)
x_kr = x_kr.view(bsz * imageNum, 4, dModel)
x_kr = x_kr.permute(0, 2, 1)
x_out = self.conv1d(x_kr)
x_out = x_out.squeeze(-1)
x_out = x_out.view(bsz, imageNum, dModel)
return x_out
def add_pred_op(self, logits, answers):
if cfg.MASK_PADUNK_IN_LOGITS:
logits = logits.clone()
logits[..., :2] += -1e30 # mask <pad> and <unk>
preds = torch.argmax(logits, dim=-1).detach() # 128
corrects = (preds == answers)
correctNum = torch.sum(corrects).item()
preds = preds.cpu()#.numpy()
return preds, correctNum
def add_answer_loss_op(self, logits, answers):
if cfg.TRAIN.LOSS_TYPE == "softmax":
loss = F.cross_entropy(logits, answers)
elif cfg.TRAIN.LOSS_TYPE == "sigmoid":
answerDist = F.one_hot(answers, self.num_choices).float() # 128 * 1845
loss = F.binary_cross_entropy_with_logits(
logits, answerDist) * self.num_choices
else:
raise Exception("non-identified loss")
return loss
class LCGNwrapper():
def __init__(self, num_vocab, num_choices, cfg=None, rank=-1, gpu=0):
self.no_decay = ['bias', 'norm']
torch.cuda.set_device(gpu)
self.model = LCGNnet(num_vocab, num_choices).cuda(gpu)
self.trainable_params = [
{
"params": [p for n, p in self.model.named_parameters() if p.requires_grad and not any(nd in n for nd in self.no_decay)],
"weight_decay": cfg.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if p.requires_grad and any(nd in n for nd in self.no_decay)],
"weight_decay": 0.0
}
]
self.optimizer = torch.optim.AdamW(
self.trainable_params, lr=cfg.TRAIN.SOLVER.LR)
#self.optimizer = AdamW(self.trainable_params, lr=cfg.TRAIN.SOLVER.LR, eps=cfg.adam_epsilon)
total_step = int(943000 / cfg.n_gpus // cfg.TRAIN.BATCH_SIZE + 1) * cfg.TRAIN.MAX_EPOCH
self.scheduler = get_cosine_schedule_with_warmup(
self.optimizer, num_warmup_steps=cfg.warmup_steps, num_training_steps=total_step)
if cfg.fp16:
self.scaler = torch.cuda.amp.GradScaler()
#self.model, self.optimizer = amp.initialize(self.model, self.optimizer, opt_level=cfg.fp16_opt_level)
if cfg.n_gpus > 1:
self.model = nn.parallel.DistributedDataParallel(self.model,
device_ids=[gpu], output_device=gpu, find_unused_parameters=True)
self.lr = cfg.TRAIN.SOLVER.LR
self.fp16 = cfg.fp16
self.fp16_opt_level = cfg.fp16_opt_level
if cfg.USE_EMA:
self.ema_param_dict = {
name: p for name, p in self.model.named_parameters()
if p.requires_grad}
self.ema = ops.ExponentialMovingAverage(
self.ema_param_dict, decay=cfg.EMA_DECAY_RATE)
self.using_ema_params = False
def train(self, training=True):
self.model.train(training)
if training:
self.set_params_from_original()
else:
self.set_params_from_ema()
def eval(self):
self.train(False)
def state_dict(self):
# Generate state dict in training mode
current_mode = self.model.training
self.train(True)
assert (not cfg.USE_EMA) or (not self.using_ema_params)
return {
'model': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'ema': self.ema.state_dict() if cfg.USE_EMA else None
}
# restore original mode
self.train(current_mode)
def load_state_dict(self, state_dict):
# Load parameters in training mode
current_mode = self.model.training
self.train(True)
assert (not cfg.USE_EMA) or (not self.using_ema_params)
new_state_dict = OrderedDict()
for k, v in state_dict['model'].items():
name = k[7: ]
new_state_dict[name] = v
self.model.load_state_dict(new_state_dict)
if 'optimizer' in state_dict:
self.optimizer.load_state_dict(state_dict['optimizer'])
else:
print('Optimizer does not exist in checkpoint! '
'Loaded only model parameters.')
if cfg.USE_EMA:
if 'ema' in state_dict and state_dict['ema'] is not None:
self.ema.load_state_dict(state_dict['ema'])
else:
print('cfg.USE_EMA is True, but EMA does not exist in '
'checkpoint! Using model params to initialize EMA.')
self.ema.load_state_dict(
{k: p.data for k, p in self.ema_param_dict.items()})
# restore original mode
self.train(current_mode)
def set_params_from_ema(self):
if (not cfg.USE_EMA) or self.using_ema_params:
return
self.original_state_dict = deepcopy(self.model.state_dict())
self.ema.set_params_from_ema(self.ema_param_dict)
self.using_ema_params = True
def set_params_from_original(self):
if (not cfg.USE_EMA) or (not self.using_ema_params):
return
self.model.load_state_dict(self.original_state_dict)
self.using_ema_params = False
def run_batch(self, batch, train, lr=None):
assert train == self.model.training
assert (not train) or (lr is not None), 'lr must be set for training'
if train:
if lr != self.lr:
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
self.lr = lr
self.optimizer.zero_grad()
if cfg.fp16:
with autocast():
batch_res = self.model.forward(batch)
else:
batch_res = self.model.forward(batch)
loss = batch_res['loss']
if self.fp16:
self.scaler.scale(loss).backward()
# with amp.scale_loss(loss, self.optimizer) as scaled_loss:
# scaled_loss.backward()
else:
loss.backward()
if cfg.TRAIN.CLIP_GRADIENTS:
if self.fp16:
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(
self.model.parameters(), cfg.TRAIN.GRAD_MAX_NORM)
#torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), cfg.TRAIN.GRAD_MAX_NORM)
else:
nn.utils.clip_grad_norm_(
self.model.parameters(), cfg.TRAIN.GRAD_MAX_NORM)
if cfg.fp16:
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.optimizer.step()
self.scheduler.step()
batch_res['lr'] = self.scheduler.get_last_lr()[0]
if cfg.USE_EMA:
self.ema.step(self.ema_param_dict)
else:
with torch.no_grad():
batch_res = self.model.forward(batch)
return batch_res
| [
"torch.mul",
"util.text_processing.VocabDict",
"torch.sum",
"torch.bmm",
"torch.nn.functional.softmax",
"torch.cuda.amp.GradScaler",
"torch.cuda.amp.autocast",
"torch.nn.parallel.DistributedDataParallel",
"torch.argmax",
"collections.OrderedDict",
"transformers.BertModel.from_pretrained",
"tor... | [((796, 840), 'torch.nn.functional.normalize', 'F.normalize', (['(kb * proj_q[:, None, :])'], {'dim': '(-1)'}), '(kb * proj_q[:, None, :], dim=-1)\n', (807, 840), True, 'import torch.nn.functional as F\n'), ((985, 1011), 'torch.nn.functional.softmax', 'F.softmax', (['raw_att'], {'dim': '(-1)'}), '(raw_att, dim=-1)\n', (994, 1011), True, 'import torch.nn.functional as F\n'), ((1584, 1657), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['"""/home/xdjf/bert_config/bert-base-uncased"""'], {}), "('/home/xdjf/bert_config/bert-base-uncased')\n", (1613, 1657), False, 'from transformers import BertModel, BertTokenizer\n'), ((1679, 1748), 'transformers.BertModel.from_pretrained', 'BertModel.from_pretrained', (['"""/home/xdjf/bert_config/bert-base-uncased"""'], {}), "('/home/xdjf/bert_config/bert-base-uncased')\n", (1704, 1748), False, 'from transformers import BertModel, BertTokenizer\n'), ((1774, 1820), 'util.text_processing.VocabDict', 'text_processing.VocabDict', (['cfg.VOCAB_NAME_FILE'], {}), '(cfg.VOCAB_NAME_FILE)\n', (1799, 1820), False, 'from util import text_processing\n'), ((2217, 2263), 'torch.zeros', 'torch.zeros', (['(self.name_dict.num_vocab - 1)', '(768)'], {}), '(self.name_dict.num_vocab - 1, 768)\n', (2228, 2263), False, 'import torch\n'), ((2676, 2702), 'torch.tensor', 'torch.tensor', (['[token_name]'], {}), '([token_name])\n', (2688, 2702), False, 'import torch\n'), ((4912, 4937), 'torch.sum', 'torch.sum', (['x_out_1'], {'dim': '(1)'}), '(x_out_1, dim=1)\n', (4921, 4937), False, 'import torch\n'), ((4956, 4981), 'torch.sum', 'torch.sum', (['x_out_2'], {'dim': '(1)'}), '(x_out_2, dim=1)\n', (4965, 4981), False, 'import torch\n'), ((5084, 5123), 'torch.cat', 'torch.cat', (['[x_expand_1, x_expand_2]', '(-1)'], {}), '([x_expand_1, x_expand_2], -1)\n', (5093, 5123), False, 'import torch\n'), ((5223, 5260), 'torch.cat', 'torch.cat', (['[x_out_1, x_out_2]'], {'dim': '(-1)'}), '([x_out_1, x_out_2], dim=-1)\n', (5232, 5260), False, 'import torch\n'), ((5326, 5358), 'torch.mul', 'torch.mul', (['x_union_expand', 'x_sum'], {}), '(x_union_expand, x_sum)\n', (5335, 5358), False, 'import torch\n'), ((6621, 6647), 'torch.cuda.set_device', 'torch.cuda.set_device', (['gpu'], {}), '(gpu)\n', (6642, 6647), False, 'import torch\n'), ((7203, 7267), 'torch.optim.AdamW', 'torch.optim.AdamW', (['self.trainable_params'], {'lr': 'cfg.TRAIN.SOLVER.LR'}), '(self.trainable_params, lr=cfg.TRAIN.SOLVER.LR)\n', (7220, 7267), False, 'import torch\n'), ((9482, 9495), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9493, 9495), False, 'from collections import OrderedDict\n'), ((1263, 1293), 'numpy.load', 'np.load', (['cfg.WRD_EMB_INIT_FILE'], {}), '(cfg.WRD_EMB_INIT_FILE)\n', (1270, 1293), True, 'import numpy as np\n'), ((1424, 1471), 'numpy.random.randn', 'np.random.randn', (['(num_vocab - 1)', 'cfg.WRD_EMB_DIM'], {}), '(num_vocab - 1, cfg.WRD_EMB_DIM)\n', (1439, 1471), True, 'import numpy as np\n'), ((2716, 2731), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2729, 2731), False, 'import torch\n'), ((6115, 6147), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'answers'], {}), '(logits, answers)\n', (6130, 6147), True, 'import torch.nn.functional as F\n'), ((7717, 7744), 'torch.cuda.amp.GradScaler', 'torch.cuda.amp.GradScaler', ([], {}), '()\n', (7742, 7744), False, 'import torch\n'), ((7913, 8030), 'torch.nn.parallel.DistributedDataParallel', 'nn.parallel.DistributedDataParallel', (['self.model'], {'device_ids': '[gpu]', 'output_device': 'gpu', 'find_unused_parameters': '(True)'}), '(self.model, device_ids=[gpu],\n output_device=gpu, find_unused_parameters=True)\n', (7948, 8030), False, 'from torch import nn\n'), ((1029, 1059), 'torch.bmm', 'torch.bmm', (['att[:, None, :]', 'kb'], {}), '(att[:, None, :], kb)\n', (1038, 1059), False, 'import torch\n'), ((5798, 5826), 'torch.argmax', 'torch.argmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (5810, 5826), False, 'import torch\n'), ((5901, 5920), 'torch.sum', 'torch.sum', (['corrects'], {}), '(corrects)\n', (5910, 5920), False, 'import torch\n'), ((12633, 12648), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12646, 12648), False, 'import torch\n'), ((6297, 6351), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', (['logits', 'answerDist'], {}), '(logits, answerDist)\n', (6331, 6351), True, 'import torch.nn.functional as F\n'), ((11331, 11341), 'torch.cuda.amp.autocast', 'autocast', ([], {}), '()\n', (11339, 11341), True, 'from torch.cuda.amp import autocast as autocast\n'), ((6220, 6256), 'torch.nn.functional.one_hot', 'F.one_hot', (['answers', 'self.num_choices'], {}), '(answers, self.num_choices)\n', (6229, 6256), True, 'import torch.nn.functional as F\n')] |
from vkapp.bot.models import Income, Payment, Blogger, News
from .usersDAO import get_or_create_blogger
def new_income_proposal(amount, news):
income = Income(amount=amount, news=news, type=Income.PROPOSAL)
blogger = news.blogger
blogger.balance += amount
blogger.save()
income.save()
def re_count_balance(uid):
blogger = get_or_create_blogger(uid)
incomes = Income.objects.filter(news__blogger__vk_user__vk_id=uid).select_related()
new_balance = 0
for value in incomes:
new_balance += value.amount
payments = Payment.objects.filter(blogger=blogger)
for value in payments:
new_balance -= value.amount
blogger.balance = new_balance
blogger.save()
return new_balance
| [
"vkapp.bot.models.Income.objects.filter",
"vkapp.bot.models.Income",
"vkapp.bot.models.Payment.objects.filter"
] | [((157, 211), 'vkapp.bot.models.Income', 'Income', ([], {'amount': 'amount', 'news': 'news', 'type': 'Income.PROPOSAL'}), '(amount=amount, news=news, type=Income.PROPOSAL)\n', (163, 211), False, 'from vkapp.bot.models import Income, Payment, Blogger, News\n'), ((563, 602), 'vkapp.bot.models.Payment.objects.filter', 'Payment.objects.filter', ([], {'blogger': 'blogger'}), '(blogger=blogger)\n', (585, 602), False, 'from vkapp.bot.models import Income, Payment, Blogger, News\n'), ((390, 446), 'vkapp.bot.models.Income.objects.filter', 'Income.objects.filter', ([], {'news__blogger__vk_user__vk_id': 'uid'}), '(news__blogger__vk_user__vk_id=uid)\n', (411, 446), False, 'from vkapp.bot.models import Income, Payment, Blogger, News\n')] |
from django.shortcuts import render
from django.http import JsonResponse
from .services import logic
def about(request):
return render(request, 'service/about.html')
def info_pokemon(request, name):
pokemon = logic.get_pokemon(name)
if pokemon:
return JsonResponse(pokemon)
else:
return JsonResponse({"Respuesta": "Pokémon no encontrado"})
| [
"django.shortcuts.render",
"django.http.JsonResponse"
] | [((134, 171), 'django.shortcuts.render', 'render', (['request', '"""service/about.html"""'], {}), "(request, 'service/about.html')\n", (140, 171), False, 'from django.shortcuts import render\n'), ((276, 297), 'django.http.JsonResponse', 'JsonResponse', (['pokemon'], {}), '(pokemon)\n', (288, 297), False, 'from django.http import JsonResponse\n'), ((323, 375), 'django.http.JsonResponse', 'JsonResponse', (["{'Respuesta': 'Pokémon no encontrado'}"], {}), "({'Respuesta': 'Pokémon no encontrado'})\n", (335, 375), False, 'from django.http import JsonResponse\n')] |
'''Gets info about the users whose tweets are retrieves'''
import json
import boto3
client = boto3.client("dynamodb")
table_name = 'arduino_twitter_users'
def lambda_handler(event, context):
if 'user_id' in event:
return get_user(event['user_id'])
return get_users()
def get_user(user_id: str) -> dict:
user_resp = client.query(
TableName=table_name,
KeyConditionExpression='id = :id',
ExpressionAttributeValues={
':id': {'S': user_id}
}
)
resp_items = user_resp['Items']
statusCode = 200 if len(resp_items)==1 else 404 if len(resp_items)==0 else 504
user =resp_items[0] if len(resp_items)==1 else None
return {
'statusCode': statusCode,
'body': user
}
def get_users():
resp = client.scan(TableName='arduino_twitter_users', Limit=10)
return {
'statusCode': 200,
'body': [e['id']['S'] for e in resp['Items']]
}
| [
"boto3.client"
] | [((94, 118), 'boto3.client', 'boto3.client', (['"""dynamodb"""'], {}), "('dynamodb')\n", (106, 118), False, 'import boto3\n')] |
#Automation
#Specifically used for small subsets with int64 as their astype
import pandas as pd
my_df = pd.read_csv("subset-1-sous-ensemble-1.csv", encoding = "latin-1")
my_df = my_df.loc[my_df['QUESTION'] == 'Q01']
my_df = my_df.loc[my_df['SURVEYR'] == 2020]
my_df = my_df.iloc[0:,[20, 22]]
print (my_df)
count = my_df['MOST_POSITIVE_OR_LEAST_NEGATIVE'].count()
print ('Count: ' + str(count))
my_df = my_df.astype({"MOST_POSITIVE_OR_LEAST_NEGATIVE": "int64", "MOST_NEGATIVE_OR_LEAST_POSITIVE": "int64"}, copy = False)
my_df.sum()["MOST_POSITIVE_OR_LEAST_NEGATIVE"]
my_df.sum()["MOST_NEGATIVE_OR_LEAST_POSITIVE"]
average_mpln = my_df.sum()["MOST_POSITIVE_OR_LEAST_NEGATIVE"] / count
average_mnlp = my_df.sum()["MOST_NEGATIVE_OR_LEAST_POSITIVE"] / count
print ('Average for MOST_POSITIVE_OR_LEAST_NEGATIVE: ' + str(average_mpln))
print ('Average for MOST_NEGATIVE_OR_LEAST_POSITIVE: ' + str(average_mnlp))
| [
"pandas.read_csv"
] | [((106, 169), 'pandas.read_csv', 'pd.read_csv', (['"""subset-1-sous-ensemble-1.csv"""'], {'encoding': '"""latin-1"""'}), "('subset-1-sous-ensemble-1.csv', encoding='latin-1')\n", (117, 169), True, 'import pandas as pd\n')] |
""" This file is create and managed by <NAME>
----------------------------------------------
It can be use only for education purpose
"""
# Splitting String into List items
import re
language = "Python, Java Script, C#, Kotlin"
language_list = re.split(',',language)
print(language_list)
| [
"re.split"
] | [((257, 280), 're.split', 're.split', (['""","""', 'language'], {}), "(',', language)\n", (265, 280), False, 'import re\n')] |
"""
Class to crawl answers mail.ru
"""
import sqlite3
import requests
import re
from bs4 import BeautifulSoup as bs
class Crawler(object):
def __init__(self, categories='all', timeline = 'all', verbose=True,
schema_name='schema.sql', db_name='q_database.sqlt',
bs_features='lxml'):
"""
init method for Crawler
:params:
categories -- (list) -- categories that should be downloaded
-- default val:'all' -- downloads all questions
timeline -- (tuple of timestamp)-- download from timeline[0] to timeline[1]
-- default val:'all' -- downloads all questions
verbose -- (bool) -- if program should output progress
schema_name-- (str) -- name of sql file that describes
structure of database
-- default val:'schema.sql'
db_name -- (str) -- name of database
-- default val:'q_database.sqlt'
bs_features-- (str) -- BeautifulSoup engine to parse html page
Look up https://www.crummy.com/software/BeautifulSoup/bs4/doc/ *Installing parser* section
It explains things about parsers
In short, if something goes wrong, change to 'html.parser'
-- deafult val:'lxml'
"""
self.categories = categories
self.timeline = timeline
self.verbose = verbose
self.schema_name = schema_name
self.db_name = db_name
self.bs_features=bs_features
self.__mail_page = 'https://otvet.mail.ru'
self.__exclude = ['Золотой фонд', 'О проектах Mail.Ru', 'Другое']
self.__reg_q_number = re.compile('[\d]+')
def __get_cats2sql(self, cats):
"""Stupid (dog) fuction to prepare data for sql"""
if self.categories != 'all':
return [(str(j), #id; autoincrement
'\'' + itm.text + '\'', #name
'\'' + itm['href'] + '\'') #link
for j, itm in enumerate(cats)
if itm.text in self.categories
and itm.text not in self.__exclude]
else:
return [(str(j), #id; autoincrement
'\'' + itm.text + '\'', #name
'\'' + itm['href'] + '\'') #link
for j, itm in enumerate(cats)
if itm.text not in self.__exclude]
def __get_subcats2sql(self, cats, i, parent_name, start_id):
"""Stupid (dog) fuction to prepare data for sql
i -- id of parent category
"""
if self.categories != 'all':
return [(str(start_id + j), #id; autoincrement
str(i), #parent_id
'\'' + itm.text + '\'', #name
'\'' + itm['href'] + '\'') #link
for j, itm in enumerate(cats)
if itm.text in self.categories
and itm.text not in self.__exclude
and parent_name not in self.__exclude
and itm.text not in self.parent_cats]
else:
return [(str(start_id + j), #id; autoincrement
str(i), #parent_id
'\'' + itm.text + '\'', #name
'\'' + itm['href'] + '\'') #link
for j, itm in enumerate(cats)
if itm.text not in self.__exclude
and parent_name not in self.__exclude
and itm.text not in self.parent_cats]
def __fetch_latest_question_id(self):
"""
Loads main page of `otvet.mail.ru` and gets `id` of latest question.
Then sets it to `self.latest_question` and returns this values
"""
page = self.get_page(params=['/open/'])
soup = bs(page, self.bs_features)
latest_q = soup.find('a', 'blue item__text')
self.latest_question = self.__reg_q_number.search(latest_q['href']).group(0)
return self.latest_question
def __is_valid_page(self, soup):
"""Checks if page contains 'Вопрос не найден' """
# TODO: add time constrains
content = soup.find('div', 'b-page__content')
if content:
if content.text == 'Вопрос не найден..':
return False
else:
category = soup.find('a', 'black list__title list__title').text.strip()
if category not in self.__exclude:
if self.categories == 'all' or category in self.categories:
return True
return False
def __select_id_from(self, table, like):
like_s = like.strip()
c = self.db.cursor()
query = c.execute('SELECT `id` FROM {} \
WHERE `name` LIKE \'{}\''.format(table, like_s))
category_id = query.fetchone()[0]
self.db.commit()
return category_id
def __get_selected_category_and_sub(self, soup):
category = soup.find('a', 'black list__title list__title')
sub_category = soup.find('a', 'medium item item_link selected')
cat_id = self.__select_id_from('categories', category.text)
if sub_category:
sub_cat_id = self.__select_id_from('sub_categories', sub_category.text)
else:
sub_cat_id = None
return cat_id, sub_cat_id
def get_db(self):
"""Returns database if exist or creates one and returns it"""
if not hasattr(self, 'db'):
self.db = sqlite3.connect(self.db_name)
self.db.row_factory = sqlite3.Row
return self.db
def init_db(self):
"""Initilizes database with sql file"""
self.get_db()
with open(self.schema_name, 'r') as f:
self.db.executescript(f.read())
self.db.commit()
def close_db(self):
"""Closes connection to database"""
if hasattr(self, 'db'):
self.db.close()
def get_page(self, params=None):
"""
Gets page with url self.__mail_page + params.
params usually would be ['questions', question_id]
:returns: string of page or None if 404 or something
"""
if params:
url = self.__mail_page + ''.join(params)
else:
url = self.__mail_page
r = requests.get(url)
if r.status_code == 200:
return r.text
else:
return None
def add_to_database(self, table, items):
"""Add tuples from *items* to *table*"""
try:
c = self.db.cursor()
for item in items:
item_for_db = ', '.join(item)
print(item_for_db)
c.execute('INSERT INTO {t} VALUES({i})'.format(t=table, i=item_for_db))
self.db.commit()
except:
raise sqlite3.Error('Unable to insert items into {}'.format(table))
def get_categories(self, page=None):
"""
Downloads parent categories
:param: page -- (list) -- should be either ['/name_of_categry/'] to get subcategories
or None to get parent categories
-- default val:None
:returns: (list) -- list of <a>...</a> with names of categories and links to them
"""
# getting main page
text_page = self.get_page(page)
soup = bs(text_page, self.bs_features)
# searching for categories
categories = soup.find_all('a', 'medium item item_link')
# adding categories to db and return list
return categories
def add_categories_to_db(self):
"""
Downloads categories and subcategories and saves them to database
"""
categories = self.get_categories()
# itm looks like this: <a class="medium item item_link" href="/autosport/" name="">Автоспорт</a>,
# so we are getting text = Автоспорт and 'href' = /autosport/
cats2sql = self.__get_cats2sql(categories)
self.add_to_database(table='categories', items=cats2sql)
self.parent_cats = [cat.text for cat in categories]
sub2sql = []
j = 0
for i, c in enumerate(categories):
par_name = c.text
href = c['href']
sub_categories = self.get_categories(page=href)
sub2sql.extend(self.__get_subcats2sql(sub_categories, i, par_name, j))
j += len(sub_categories)
self.add_to_database(table='sub_categories',
items=sub2sql)
def get_latest_question_id(self):
"""Gets latest_question from database. If there is None, fetch one from web."""
c = self.db.cursor()
resp = c.execute('SELECT max(`id`) FROM questions')
latest_q = resp.fetchone()
self.db.commit()
if latest_q:
self.latest_question = latest_q[0]
return latest_q[0]
else:
return self.__fetch_latest_question_id()
def fetch_pages(self, from_id, to_id):
"""
Genrator for pages. Yields soup object only if page exists and valid.
:params:
from_id -- (int) -- Number of question to start from
to_id -- (int) -- Number of last question
:yields:
(page_id, BeautifulSoup object) tuple
"""
for p_id in range(from_id, to_id):
page = self.get_page(['/question/', '{}/'.format(p_id)])
# if error 404, get_page returns None
if page:
# Checking if page contains "Вопрос не найден"
soup = bs(page, self.bs_features)
if self.__is_valid_page(soup):
yield(p_id, soup)
def retrieve_data(self, soup_page):
"""
Gets tuples of relevant data from BeautifulSoup parsed page
:params:
soup_page -- (str) -- BeautifulSoup parsed page
:returns:
tuple of title, category_id, sub_category_id, comment_from_author, answers
"""
title = soup_page.find('h1', 'q--qtext').text
cat_id, sub_cat_id = self.__get_selected_category_and_sub(soup_page)
raw_comments = soup_page.find_all('div', 'q--qcomment medium')
if raw_comments:
comments = ' '.join([q.text for q in raw_comments])
else:
comments = None
raw_answers = soup_page.find_all('div', 'a--atext atext')
if raw_answers:
answers = [a.text for a in raw_answers]
else:
answers = None
return title, cat_id, sub_cat_id, comments, answers
def download_all_questions(self):
for i, page in self.fetch_pages(0, 10):
title, cat_id, sub_cat_id, text, answers = self.retrieve_data(page)
c = self.db.cursor()
q_4_db = (str(i), str(cat_id), str(sub_cat_id), str(title), str(text))
c.execute('INSERT INTO questions VALUES(?, ?, ?, ?, ?)', q_4_db)
for a in answers:
a_4_db = (str(i), str(a))
c.execute('INSERT INTO answers(`question_id`, `a_text`) VALUES(?, ?)', a_4_db)
| [
"bs4.BeautifulSoup",
"sqlite3.connect",
"requests.get",
"re.compile"
] | [((1885, 1905), 're.compile', 're.compile', (['"""[\\\\d]+"""'], {}), "('[\\\\d]+')\n", (1895, 1905), False, 'import re\n'), ((4186, 4212), 'bs4.BeautifulSoup', 'bs', (['page', 'self.bs_features'], {}), '(page, self.bs_features)\n', (4188, 4212), True, 'from bs4 import BeautifulSoup as bs\n'), ((6691, 6708), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (6703, 6708), False, 'import requests\n'), ((7761, 7792), 'bs4.BeautifulSoup', 'bs', (['text_page', 'self.bs_features'], {}), '(text_page, self.bs_features)\n', (7763, 7792), True, 'from bs4 import BeautifulSoup as bs\n'), ((5882, 5911), 'sqlite3.connect', 'sqlite3.connect', (['self.db_name'], {}), '(self.db_name)\n', (5897, 5911), False, 'import sqlite3\n'), ((9980, 10006), 'bs4.BeautifulSoup', 'bs', (['page', 'self.bs_features'], {}), '(page, self.bs_features)\n', (9982, 10006), True, 'from bs4 import BeautifulSoup as bs\n')] |
#!/usr/bin/env python
import math
import random
import time
class MinMaxHeap:
@staticmethod
def get_max_nodes_for_height(height):
return 2 ** (height + 1) - 1
def __init__(self, is_max=True):
self._items = []
self._is_max = is_max
def __len__(self):
return len(self._items)
# FIXME: Not pretty pretting a non-complete tree
def __str__(self):
try:
output = []
output.append("Heap: {}".format(self._items))
height = self.get_height()
output.append("Height: {}".format(height))
max_nodes_for_height = MinMaxHeap.get_max_nodes_for_height(height)
# output.append("Max nodes for given height: {}\n".format(
# max_nodes_for_height))
if len(self._items) > 0:
total_print_count = 1
cur_line_count = 2
lines = [[self._items[0]]]
while total_print_count < len(self._items):
to_print = []
while len(to_print) < cur_line_count and total_print_count < len(self._items):
to_print.append(self._items[total_print_count])
total_print_count += 1
lines.append(to_print)
cur_line_count *= 2
output.append("Levels: {}".format(lines))
output.append('')
# + 1 space + 2 brackets
base_line_length = max_nodes_for_height * (1 + 2) - 1
base_line = ''.join([' ' for i in range(base_line_length)])
for i in range(len(lines)):
str_to_print = list(base_line)
# if i == len(lines) - 1:
# elem_spacing = base_line_length // max_nodes_for_height
# else:
elem_spacing = base_line_length // len(lines[i])
idx = 0
pos = 0
while idx < len(lines[i]):
try:
str_to_print[pos] = '{'
str_to_print[pos + 1] = '}'
idx += 1
pos += elem_spacing + 1
except:
print(idx, pos, "".join(str_to_print))
raise
last_pos = pos + 1 - elem_spacing
post_space_count = len(str_to_print) - last_pos - 1
if post_space_count > 0:
str_to_print = [' ' for j in range(
post_space_count // 2)] + str_to_print[:-(post_space_count // 2)]
joint = "".join(str_to_print)
# output.append(i, lines[i], elem_spacing, post_space_count, joint)
output.append(joint.format(*lines[i]))
return "\n".join(output)
except:
print("\n".join(output))
raise
def __heapify_up(self, compare_method):
idx = len(self._items) - 1
parent_idx = self._get_parent_index(idx)
while parent_idx is not None and compare_method(self._items[idx], self._items[parent_idx]):
self._swap(idx, parent_idx)
idx = parent_idx
parent_idx = self._get_parent_index(idx)
def __heapify_down(self, compare_method):
idx = 0
left = self._get_left_child_index(idx)
right = self._get_right_child_index(idx)
while left is not None:
child_to_use_idx = left
if right is not None and compare_method(self._items[child_to_use_idx], self._items[right]):
child_to_use_idx = right
self._swap(idx, child_to_use_idx)
idx = child_to_use_idx
left = self._get_left_child_index(idx)
right = self._get_right_child_index(idx)
def _swap(self, idxA, idxB):
aux = self._items[idxA]
self._items[idxA] = self._items[idxB]
self._items[idxB] = aux
def _get_parent_index(self, idx):
parent_idx = (idx - 1) // 2
if parent_idx < 0:
return None
return parent_idx
def _get_left_child_index(self, idx):
left_idx = idx * 2 + 1
if left_idx >= len(self._items):
return None
return left_idx
def _get_right_child_index(self, idx):
right_idx = idx * 2 + 2
if right_idx >= len(self._items):
return None
return right_idx
def _heapify_up(self):
if self._is_max:
self.__heapify_up(lambda a, b: a > b)
else:
self.__heapify_up(lambda a, b: a < b)
def _heapify_down(self):
if self._is_max:
self.__heapify_down(lambda a, b: a < b)
else:
self.__heapify_down(lambda a, b: a > b)
def get_height(self):
return int(math.ceil(math.log(len(self._items) + 1, 2) - 1))
def peek(self):
return self._items[0]
def enqueue(self, item):
self._items.append(item)
self._heapify_up()
def dequeue(self):
head = self._items[0]
self._items[0] = self._items[-1]
self._items = self._items[:-1]
self._heapify_down()
return head
if __name__ == "__main__":
random.seed(time.time())
max_gen_height = 4
def test_cycle(heap):
for elem_idx in range(random.randint(1, 2 * (2 ** max_gen_height) - 1)):
heap.enqueue(random.randint(0, 100))
print(heap)
print('\n')
removed = heap.dequeue()
print("Removed: {}, now queue is:".format(removed))
print(heap)
print('\n')
print('\n')
print("# MaxHeap Tests")
for rep_count in range(30):
print("## MaxHeap - Round {}".format(rep_count))
max_heap = MinMaxHeap()
test_cycle(max_heap)
print("# MinHeap Tests")
for rep_count in range(30):
print("## MinHeap - Round {}".format(rep_count))
min_heap = MinMaxHeap(False)
test_cycle(min_heap) | [
"time.time",
"random.randint"
] | [((5373, 5384), 'time.time', 'time.time', ([], {}), '()\n', (5382, 5384), False, 'import time\n'), ((5466, 5512), 'random.randint', 'random.randint', (['(1)', '(2 * 2 ** max_gen_height - 1)'], {}), '(1, 2 * 2 ** max_gen_height - 1)\n', (5480, 5512), False, 'import random\n'), ((5542, 5564), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (5556, 5564), False, 'import random\n')] |
from django.conf.urls import include, url
urlpatterns = [
url(r'^api/morango/v1/', include('morango.api.urls')),
]
| [
"django.conf.urls.include"
] | [((89, 116), 'django.conf.urls.include', 'include', (['"""morango.api.urls"""'], {}), "('morango.api.urls')\n", (96, 116), False, 'from django.conf.urls import include, url\n')] |
#!/usr/bin/env python
# Copyright (c) 2010 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script for Robot's DatabaseLibrary distributions"""
from distutils.core import setup
import setuptools
import sys
import os
src_path = os.path.join(os.path.dirname(__file__), 'src')
sys.path.insert(0, src_path)
__version_file_path__ = os.path.join(src_path, 'DatabaseLibrary', 'VERSION')
__version__ = open(__version_file_path__, 'r').read().strip()
def main():
setup(name = 'robotframework-databaselibrary',
version = __version__,
description = 'Database utility library for Robot Framework',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/franz-see/Robotframework-Database-Library',
package_dir = { '' : 'src'},
packages = ['DatabaseLibrary'],
package_data = {'DatabaseLibrary': ['VERSION']},
requires = ['robotframework']
)
if __name__ == "__main__":
main()
| [
"os.path.dirname",
"sys.path.insert",
"os.path.join",
"distutils.core.setup"
] | [((803, 831), 'sys.path.insert', 'sys.path.insert', (['(0)', 'src_path'], {}), '(0, src_path)\n', (818, 831), False, 'import sys\n'), ((857, 909), 'os.path.join', 'os.path.join', (['src_path', '"""DatabaseLibrary"""', '"""VERSION"""'], {}), "(src_path, 'DatabaseLibrary', 'VERSION')\n", (869, 909), False, 'import os\n'), ((769, 794), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (784, 794), False, 'import os\n'), ((989, 1377), 'distutils.core.setup', 'setup', ([], {'name': '"""robotframework-databaselibrary"""', 'version': '__version__', 'description': '"""Database utility library for Robot Framework"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/franz-see/Robotframework-Database-Library"""', 'package_dir': "{'': 'src'}", 'packages': "['DatabaseLibrary']", 'package_data': "{'DatabaseLibrary': ['VERSION']}", 'requires': "['robotframework']"}), "(name='robotframework-databaselibrary', version=__version__,\n description='Database utility library for Robot Framework', author=\n '<NAME>', author_email='<EMAIL>', url=\n 'https://github.com/franz-see/Robotframework-Database-Library',\n package_dir={'': 'src'}, packages=['DatabaseLibrary'], package_data={\n 'DatabaseLibrary': ['VERSION']}, requires=['robotframework'])\n", (994, 1377), False, 'from distutils.core import setup\n')] |
from flask_restplus import Resource, marshal, abort
from flask_login import login_required, current_user
from webserver.api import commit_or_abort, profile
from webserver.models import db, Project
from webserver import logger
from .namespace import (
api,
project,
project_base,
project_list
)
@api.route('/')
class Projects(Resource):
@login_required
@api.marshal_with(project_list)
def get(self):
""" Returns a list of users projects """
return current_user.projects.paginate(1, per_page=100, max_per_page=500)
@login_required
@api.expect(project_base)
@api.marshal_with(project)
def post(self):
""" Creates new project """
args = api.payload
with commit_or_abort(error_message='Operation failed. Could not create project.'):
project_model = Project(**args)
current_user.projects.append(project_model)
db.session.add(project_model)
return project_model
@api.route('/<int:project_id>')
class ProjectId(Resource):
@login_required
@api.marshal_with(project)
def get(self, project_id):
""" Gets projects """
return current_user.projects.get_or_404(project_id)
@login_required
@api.marshal_with(project)
def put(self, project_id):
""" Updates projects """
return False
@login_required
@api.marshal_with(project)
def delete(self, project_id):
""" Deletes projects """
with commit_or_abort(error_message='Operation failed. Could not delete project'):
project_model = current_user.get_or_404(project_id)
project_model.delete()
return project_model
| [
"flask_login.current_user.projects.append",
"flask_login.current_user.get_or_404",
"webserver.models.Project",
"flask_login.current_user.projects.get_or_404",
"webserver.models.db.session.add",
"webserver.api.commit_or_abort",
"flask_login.current_user.projects.paginate"
] | [((496, 561), 'flask_login.current_user.projects.paginate', 'current_user.projects.paginate', (['(1)'], {'per_page': '(100)', 'max_per_page': '(500)'}), '(1, per_page=100, max_per_page=500)\n', (526, 561), False, 'from flask_login import login_required, current_user\n'), ((1180, 1224), 'flask_login.current_user.projects.get_or_404', 'current_user.projects.get_or_404', (['project_id'], {}), '(project_id)\n', (1212, 1224), False, 'from flask_login import login_required, current_user\n'), ((741, 817), 'webserver.api.commit_or_abort', 'commit_or_abort', ([], {'error_message': '"""Operation failed. Could not create project."""'}), "(error_message='Operation failed. Could not create project.')\n", (756, 817), False, 'from webserver.api import commit_or_abort, profile\n'), ((847, 862), 'webserver.models.Project', 'Project', ([], {}), '(**args)\n', (854, 862), False, 'from webserver.models import db, Project\n'), ((875, 918), 'flask_login.current_user.projects.append', 'current_user.projects.append', (['project_model'], {}), '(project_model)\n', (903, 918), False, 'from flask_login import login_required, current_user\n'), ((931, 960), 'webserver.models.db.session.add', 'db.session.add', (['project_model'], {}), '(project_model)\n', (945, 960), False, 'from webserver.models import db, Project\n'), ((1494, 1569), 'webserver.api.commit_or_abort', 'commit_or_abort', ([], {'error_message': '"""Operation failed. Could not delete project"""'}), "(error_message='Operation failed. Could not delete project')\n", (1509, 1569), False, 'from webserver.api import commit_or_abort, profile\n'), ((1599, 1634), 'flask_login.current_user.get_or_404', 'current_user.get_or_404', (['project_id'], {}), '(project_id)\n', (1622, 1634), False, 'from flask_login import login_required, current_user\n')] |
# SPDX-FileCopyrightText: 2020 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# SPDX-License-Identifier: Apache-2.0
import gzip
import hashlib
import logging
import subprocess
import sys
from pathlib import Path
def uncompress_gds(project_path, caravel_root):
cmd = f"make -f {caravel_root}/Makefile uncompress;"
try:
logging.info(f"{{{{EXTRACTING FILES}}}} Extracting compressed files in: {project_path}")
subprocess.run(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True, cwd=str(project_path))
except subprocess.CalledProcessError as error:
logging.info(f"{{{{EXTRACTING FILES ERROR}}}} Make 'uncompress' Error: {error}")
sys.exit(252)
def is_binary_file(filename):
file_extensions = Path(filename).suffix
return 'gds' in file_extensions or 'gz' in file_extensions
def is_not_binary_file(filename):
return not is_binary_file(filename)
def file_hash(filename):
def is_compressed(filename):
with open(filename, 'rb') as f:
return f.read(2) == b'\x1f\x8b'
BSIZE = 65536
sha1 = hashlib.sha1()
f = gzip.open(filename, 'rb') if is_compressed(filename) else open(filename, 'rb')
while True:
data = f.read(BSIZE)
if not data:
break
sha1.update(data)
f.close()
return sha1.hexdigest()
def get_project_config(project_path, caravel_root):
project_config = {}
analog_gds_path = project_path / 'gds/user_analog_project_wrapper.gds'
digital_gds_path = project_path / 'gds/user_project_wrapper.gds'
if analog_gds_path.exists() and not digital_gds_path.exists():
project_config['type'] = 'analog'
project_config['netlist_type'] = 'spice'
project_config['top_module'] = 'caravan'
project_config['user_module'] = 'user_analog_project_wrapper'
project_config['golden_wrapper'] = 'user_analog_project_wrapper_empty'
project_config['top_netlist'] = caravel_root / "spi/lvs/caravan.spice"
project_config['user_netlist'] = project_path / "netgen/user_analog_project_wrapper.spice"
elif digital_gds_path.exists() and not analog_gds_path.exists():
project_config['type'] = 'digital'
project_config['netlist_type'] = 'verilog'
project_config['top_module'] = 'caravel'
project_config['user_module'] = 'user_project_wrapper'
project_config['golden_wrapper'] = 'user_project_wrapper_empty'
project_config['top_netlist'] = caravel_root / "verilog/gl/caravel.v"
project_config['user_netlist'] = project_path / "verilog/gl/user_project_wrapper.v"
else:
logging.fatal("{{IDENTIFYING PROJECT TYPE FAILED}} A single valid GDS was not found. "
"If your project is digital, a GDS file should exist under the project's 'gds' directory named 'user_project_wrapper(.gds/.gds.gz)'. "
"If your project is analog, a GDS file should exist under the project's 'gds' directory named 'user_analog_project_wrapper(.gds/.gds.gz)'.")
sys.exit(254)
return project_config
| [
"gzip.open",
"pathlib.Path",
"logging.fatal",
"sys.exit",
"hashlib.sha1",
"logging.info"
] | [((1609, 1623), 'hashlib.sha1', 'hashlib.sha1', ([], {}), '()\n', (1621, 1623), False, 'import hashlib\n'), ((858, 951), 'logging.info', 'logging.info', (['f"""{{{{EXTRACTING FILES}}}} Extracting compressed files in: {project_path}"""'], {}), "(\n f'{{{{EXTRACTING FILES}}}} Extracting compressed files in: {project_path}')\n", (870, 951), False, 'import logging\n'), ((1274, 1288), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (1278, 1288), False, 'from pathlib import Path\n'), ((1632, 1657), 'gzip.open', 'gzip.open', (['filename', '"""rb"""'], {}), "(filename, 'rb')\n", (1641, 1657), False, 'import gzip\n'), ((1117, 1202), 'logging.info', 'logging.info', (['f"""{{{{EXTRACTING FILES ERROR}}}} Make \'uncompress\' Error: {error}"""'], {}), '(f"{{{{EXTRACTING FILES ERROR}}}} Make \'uncompress\' Error: {error}"\n )\n', (1129, 1202), False, 'import logging\n'), ((1206, 1219), 'sys.exit', 'sys.exit', (['(252)'], {}), '(252)\n', (1214, 1219), False, 'import sys\n'), ((3155, 3521), 'logging.fatal', 'logging.fatal', (['"""{{IDENTIFYING PROJECT TYPE FAILED}} A single valid GDS was not found. If your project is digital, a GDS file should exist under the project\'s \'gds\' directory named \'user_project_wrapper(.gds/.gds.gz)\'. If your project is analog, a GDS file should exist under the project\'s \'gds\' directory named \'user_analog_project_wrapper(.gds/.gds.gz)\'."""'], {}), '(\n "{{IDENTIFYING PROJECT TYPE FAILED}} A single valid GDS was not found. If your project is digital, a GDS file should exist under the project\'s \'gds\' directory named \'user_project_wrapper(.gds/.gds.gz)\'. If your project is analog, a GDS file should exist under the project\'s \'gds\' directory named \'user_analog_project_wrapper(.gds/.gds.gz)\'."\n )\n', (3168, 3521), False, 'import logging\n'), ((3570, 3583), 'sys.exit', 'sys.exit', (['(254)'], {}), '(254)\n', (3578, 3583), False, 'import sys\n')] |
# mimic_ts.py
# NOTE: will not be used
# Converts raw MIMIC time-series data to the standardized format
# Input: the MIMIC-III database in CSV format (pointed to by INPUT_PATH)
# Output: ts.csv
# TODO: cohort selection
# TODO: urine output
import pandas as pd
# TODO: change to actual (non-demo) data path
INPUT_PATH = 'datasets/mimic_demo/'
OUTPUT_PATH = 'datasets/mimic_demo_output/'
# Dictionary where keys are Carevue and Metavision names for desired chart
# variables and values are the column names for the output dataset
# (note there are two source patient monitoring systems)
CHART_VARS = {
'Heart Rate': 'hr',
'Respiratory Rate': 'rr',
'Arterial Blood Pressure mean': 'bp', 'Arterial BP Mean': 'bp',
'Temperature Fahrenheit': 'temp', 'Temperature F': 'temp',
'O2 saturation pulseoxymetry': 'spo2', 'SpO2': 'spo2',
'Inspired O2 Fraction': 'fio2', 'FiO2 Set': 'fio2'}
# Dictionary where keys are MIMIC names for desired lab variables and values
# are the column names for the output dataset
LAB_VARS = {
'Urea Nitrogen': 'bun',
'Creatinine': 'creatinine',
'Glucose': 'glucose',
'Bicarbonate': 'bicarbonate',
'Hematocrit': 'hct',
'Lactate': 'lactate',
'Magnesium': 'magnesium',
'Platelet Count': 'platelet',
'Potassium': 'potassium',
'Sodium': 'sodium',
'White Blood Cells': 'wbc'}
chartevents = pd.read_csv(INPUT_PATH + 'CHARTEVENTS.csv')
labevents = pd.read_csv(INPUT_PATH + 'LABEVENTS.csv')
d_items = pd.read_csv(INPUT_PATH + 'D_ITEMS.csv')
d_labitems = pd.read_csv(INPUT_PATH + 'D_LABITEMS.csv')
# Merge chart data, keeping only desired variables
chart_merge = chartevents.merge(d_items, how='inner', on='ITEMID')
assert len(chart_merge) == len(chartevents)
chart_merge.columns = chart_merge.columns.str.lower()
chart_merge['time'] = pd.to_datetime(chart_merge['charttime'])
chart_merge = chart_merge.loc[chart_merge['label'].isin(CHART_VARS),
['subject_id', 'label', 'valuenum', 'time']]
# Standardize column names and value formats across Carevue and Metavision
chart_merge.loc[chart_merge['label'] == 'FiO2 Set', 'valuenum'] *= 100
chart_merge['label'] = chart_merge['label'].map(CHART_VARS)
# Same for lab data
lab_merge = labevents.merge(d_labitems, how='inner', on='ITEMID')
assert len(lab_merge) == len(labevents)
lab_merge.columns = lab_merge.columns.str.lower()
lab_merge['time'] = pd.to_datetime(lab_merge['charttime'])
lab_merge = lab_merge.loc[lab_merge['label'].isin(LAB_VARS),
['subject_id', 'label', 'valuenum', 'time']]
lab_merge['label'] = lab_merge['label'].map(LAB_VARS)
# Reshape data to wide format
ts = chart_merge.append(lab_merge)
ts = ts.pivot_table(values='valuenum', columns='label',
index=['subject_id', 'time'])
ts.to_csv(OUTPUT_PATH + 'ts.csv')
| [
"pandas.to_datetime",
"pandas.read_csv"
] | [((1383, 1426), 'pandas.read_csv', 'pd.read_csv', (["(INPUT_PATH + 'CHARTEVENTS.csv')"], {}), "(INPUT_PATH + 'CHARTEVENTS.csv')\n", (1394, 1426), True, 'import pandas as pd\n'), ((1441, 1482), 'pandas.read_csv', 'pd.read_csv', (["(INPUT_PATH + 'LABEVENTS.csv')"], {}), "(INPUT_PATH + 'LABEVENTS.csv')\n", (1452, 1482), True, 'import pandas as pd\n'), ((1497, 1536), 'pandas.read_csv', 'pd.read_csv', (["(INPUT_PATH + 'D_ITEMS.csv')"], {}), "(INPUT_PATH + 'D_ITEMS.csv')\n", (1508, 1536), True, 'import pandas as pd\n'), ((1551, 1593), 'pandas.read_csv', 'pd.read_csv', (["(INPUT_PATH + 'D_LABITEMS.csv')"], {}), "(INPUT_PATH + 'D_LABITEMS.csv')\n", (1562, 1593), True, 'import pandas as pd\n'), ((1833, 1873), 'pandas.to_datetime', 'pd.to_datetime', (["chart_merge['charttime']"], {}), "(chart_merge['charttime'])\n", (1847, 1873), True, 'import pandas as pd\n'), ((2422, 2460), 'pandas.to_datetime', 'pd.to_datetime', (["lab_merge['charttime']"], {}), "(lab_merge['charttime'])\n", (2436, 2460), True, 'import pandas as pd\n')] |
#########################################
############ Kawaii Commands ############
#########################################
import discord
import random
from config import embed_color, embed_color_attention, embed_color_error, embed_color_succes
from discord.ext import commands
from cogs.data.kawaiidata import *
class Kawaii():
def __init__(self, bot):
self.bot = bot
#hug command (-hug [@mention])
@commands.guild_only()
@commands.command()
async def hug(self, ctx, *, member : discord.Member = None):
author = ctx.author
if not member:
member = self.bot.user
choice = random.choice(hugs)
embed = discord.Embed(description = f"**{member.name}** you got hugged by **{author.name}**", color = embed_color)
embed.set_image(url = f"{choice}")
await ctx.send(embed = embed)
#poke command (-poke [@mention])
@commands.guild_only()
@commands.command()
async def poke(self, ctx, *, member : discord.Member = None):
author = ctx.author
if not member:
member = self.bot.user
choice = random.choice(pokes)
embed = discord.Embed(description = f"**{member.name}** you got poked by **{author.name}**", color = embed_color)
embed.set_image(url = f"{choice}")
await ctx.send(embed = embed)
#love command (-love [@mention])
@commands.guild_only()
@commands.command()
async def gg(self, ctx, *, member : discord.Member = None):
author = ctx.author
if not member:
member = self.bot.user
choice = random.choice(gg)
embed = discord.Embed(description = f"**{author.name}** said: GG **{member.name}**", color = embed_color)
embed.set_image(url = f"{choice}")
await ctx.send(embed = embed)
#love command (-love [@mention])
@commands.guild_only()
@commands.command()
async def love(self, ctx, *, member : discord.Member = None):
author = ctx.author
if not member:
member = self.bot.user
choice = random.choice(love)
embed = discord.Embed(description = f"**{author.name}** loves **{member.name}**", color = embed_color)
embed.set_image(url = f"{choice}")
await ctx.send(embed = embed)
#wave command (-wave [@mention])
@commands.guild_only()
@commands.command()
async def wave(self, ctx, *, member : discord.Member = None):
author = ctx.author
if not member:
member = self.bot.user
choice = random.choice(waves)
embed = discord.Embed(description = f"**{author.name}** waves at you **{member.name}**", color = embed_color)
embed.set_image(url = f"{choice}")
await ctx.send(embed = embed)
#hide command (-hide [@mention])
@commands.guild_only()
@commands.command()
async def hide(self, ctx, *, member : discord.Member = None):
author = ctx.author
if not member:
member = self.bot.user
choice = random.choice(hides)
embed = discord.Embed(description = f"**{author.name}** is hiding for **{member.name}**", color = embed_color)
embed.set_image(url = f"{choice}")
await ctx.send(embed = embed)
#pat command (-pat [@mention])
@commands.guild_only()
@commands.command()
async def pat(self, ctx, *, member : discord.Member = None):
author = ctx.author
if not member:
member = self.bot.user
choice = random.choice(pats)
embed = discord.Embed(description = f"**{member.name}** you got a pat from **{author.name}**", color = embed_color)
embed.set_image(url = f"{choice}")
await ctx.send(embed = embed)
#hit command (-hit [@mention])
@commands.guild_only()
@commands.command()
async def hit(self, ctx, *, member : discord.Member = None):
author = ctx.author
if not member:
member = self.bot.user
choice = random.choice(hits)
embed = discord.Embed(description = f"**{member.name}** got hit by **{author.name}**", color = embed_color)
embed.set_image(url = f"{choice}")
await ctx.send(embed = embed)
#blush command (-blush)
@commands.guild_only()
@commands.command()
async def blush(self, ctx):
choice = random.choice(blush)
embed = discord.Embed(description = f"**{ctx.author.name}** is blushing!", color = embed_color)
embed.set_image(url = f"{choice}")
await ctx.send(embed = embed)
#shine command (-shine)
@commands.guild_only()
@commands.command()
async def shine(self, ctx):
choice = random.choice(shines)
embed = discord.Embed(description = f"**{ctx.author.name}** is shining!", color = embed_color)
embed.set_image(url = f"{choice}")
await ctx.send(embed = embed)
#happy command (-happy)
@commands.guild_only()
@commands.command()
async def happy(self, ctx):
choice = random.choice(happy)
embed = discord.Embed(description = f"**{ctx.author.name}** feels happy!", color = embed_color)
embed.set_image(url = f"{choice}")
await ctx.send(embed = embed)
#angry command (-angry)
@commands.guild_only()
@commands.command()
async def angry(self, ctx):
choice = random.choice(angry)
embed = discord.Embed(description = f"**{ctx.author.name}** feels angry!", color = embed_color)
embed.set_image(url = f"{choice}")
await ctx.send(embed = embed)
#sad command (-sad)
@commands.guild_only()
@commands.command()
async def sad(self, ctx):
choice = random.choice(sad)
embed = discord.Embed(description = f"**{ctx.author.name}** feels sad!", color = embed_color)
embed.set_image(url = f"{choice}")
await ctx.send(embed = embed)
#dancing command (-dance)
@commands.guild_only()
@commands.command()
async def dance(self, ctx):
choice = random.choice(dancing)
embed = discord.Embed(description = f"**{ctx.author.name}** is cheerfully dancing!", color = embed_color)
embed.set_image(url = f"{choice}")
await ctx.send(embed = embed)
#Not Kawaii but still really needed!
#dab command (-dab)
@commands.guild_only()
@commands.command()
async def dab(self, ctx):
choice = random.choice(dabs)
embed = discord.Embed(description = f"**{ctx.author.name}** is dabbing!", color = embed_color)
embed.set_image(url = f"{choice}")
await ctx.send(embed = embed)
def setup(bot):
bot.add_cog(Kawaii(bot))
| [
"discord.ext.commands.guild_only",
"discord.Embed",
"discord.ext.commands.command",
"random.choice"
] | [((423, 444), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (442, 444), False, 'from discord.ext import commands\n'), ((450, 468), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (466, 468), False, 'from discord.ext import commands\n'), ((902, 923), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (921, 923), False, 'from discord.ext import commands\n'), ((929, 947), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (945, 947), False, 'from discord.ext import commands\n'), ((1382, 1403), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (1401, 1403), False, 'from discord.ext import commands\n'), ((1409, 1427), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (1425, 1427), False, 'from discord.ext import commands\n'), ((1849, 1870), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (1868, 1870), False, 'from discord.ext import commands\n'), ((1876, 1894), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (1892, 1894), False, 'from discord.ext import commands\n'), ((2317, 2338), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (2336, 2338), False, 'from discord.ext import commands\n'), ((2344, 2362), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (2360, 2362), False, 'from discord.ext import commands\n'), ((2793, 2814), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (2812, 2814), False, 'from discord.ext import commands\n'), ((2820, 2838), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (2836, 2838), False, 'from discord.ext import commands\n'), ((3268, 3289), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (3287, 3289), False, 'from discord.ext import commands\n'), ((3295, 3313), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (3311, 3313), False, 'from discord.ext import commands\n'), ((3746, 3767), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (3765, 3767), False, 'from discord.ext import commands\n'), ((3773, 3791), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (3789, 3791), False, 'from discord.ext import commands\n'), ((4209, 4230), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (4228, 4230), False, 'from discord.ext import commands\n'), ((4236, 4254), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (4252, 4254), False, 'from discord.ext import commands\n'), ((4541, 4562), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (4560, 4562), False, 'from discord.ext import commands\n'), ((4568, 4586), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (4584, 4586), False, 'from discord.ext import commands\n'), ((4873, 4894), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (4892, 4894), False, 'from discord.ext import commands\n'), ((4900, 4918), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (4916, 4918), False, 'from discord.ext import commands\n'), ((5205, 5226), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (5224, 5226), False, 'from discord.ext import commands\n'), ((5232, 5250), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (5248, 5250), False, 'from discord.ext import commands\n'), ((5533, 5554), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (5552, 5554), False, 'from discord.ext import commands\n'), ((5560, 5578), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (5576, 5578), False, 'from discord.ext import commands\n'), ((5861, 5882), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (5880, 5882), False, 'from discord.ext import commands\n'), ((5888, 5906), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (5904, 5906), False, 'from discord.ext import commands\n'), ((6238, 6259), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (6257, 6259), False, 'from discord.ext import commands\n'), ((6265, 6283), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (6281, 6283), False, 'from discord.ext import commands\n'), ((639, 658), 'random.choice', 'random.choice', (['hugs'], {}), '(hugs)\n', (652, 658), False, 'import random\n'), ((675, 787), 'discord.Embed', 'discord.Embed', ([], {'description': 'f"""**{member.name}** you got hugged by **{author.name}**"""', 'color': 'embed_color'}), "(description=\n f'**{member.name}** you got hugged by **{author.name}**', color=embed_color\n )\n", (688, 787), False, 'import discord\n'), ((1119, 1139), 'random.choice', 'random.choice', (['pokes'], {}), '(pokes)\n', (1132, 1139), False, 'import random\n'), ((1156, 1262), 'discord.Embed', 'discord.Embed', ([], {'description': 'f"""**{member.name}** you got poked by **{author.name}**"""', 'color': 'embed_color'}), "(description=\n f'**{member.name}** you got poked by **{author.name}**', color=embed_color)\n", (1169, 1262), False, 'import discord\n'), ((1597, 1614), 'random.choice', 'random.choice', (['gg'], {}), '(gg)\n', (1610, 1614), False, 'import random\n'), ((1631, 1728), 'discord.Embed', 'discord.Embed', ([], {'description': 'f"""**{author.name}** said: GG **{member.name}**"""', 'color': 'embed_color'}), "(description=f'**{author.name}** said: GG **{member.name}**',\n color=embed_color)\n", (1644, 1728), False, 'import discord\n'), ((2066, 2085), 'random.choice', 'random.choice', (['love'], {}), '(love)\n', (2079, 2085), False, 'import random\n'), ((2102, 2196), 'discord.Embed', 'discord.Embed', ([], {'description': 'f"""**{author.name}** loves **{member.name}**"""', 'color': 'embed_color'}), "(description=f'**{author.name}** loves **{member.name}**',\n color=embed_color)\n", (2115, 2196), False, 'import discord\n'), ((2534, 2554), 'random.choice', 'random.choice', (['waves'], {}), '(waves)\n', (2547, 2554), False, 'import random\n'), ((2571, 2673), 'discord.Embed', 'discord.Embed', ([], {'description': 'f"""**{author.name}** waves at you **{member.name}**"""', 'color': 'embed_color'}), "(description=\n f'**{author.name}** waves at you **{member.name}**', color=embed_color)\n", (2584, 2673), False, 'import discord\n'), ((3010, 3030), 'random.choice', 'random.choice', (['hides'], {}), '(hides)\n', (3023, 3030), False, 'import random\n'), ((3047, 3150), 'discord.Embed', 'discord.Embed', ([], {'description': 'f"""**{author.name}** is hiding for **{member.name}**"""', 'color': 'embed_color'}), "(description=\n f'**{author.name}** is hiding for **{member.name}**', color=embed_color)\n", (3060, 3150), False, 'import discord\n'), ((3484, 3503), 'random.choice', 'random.choice', (['pats'], {}), '(pats)\n', (3497, 3503), False, 'import random\n'), ((3520, 3633), 'discord.Embed', 'discord.Embed', ([], {'description': 'f"""**{member.name}** you got a pat from **{author.name}**"""', 'color': 'embed_color'}), "(description=\n f'**{member.name}** you got a pat from **{author.name}**', color=\n embed_color)\n", (3533, 3633), False, 'import discord\n'), ((3962, 3981), 'random.choice', 'random.choice', (['hits'], {}), '(hits)\n', (3975, 3981), False, 'import random\n'), ((3998, 4097), 'discord.Embed', 'discord.Embed', ([], {'description': 'f"""**{member.name}** got hit by **{author.name}**"""', 'color': 'embed_color'}), "(description=f'**{member.name}** got hit by **{author.name}**',\n color=embed_color)\n", (4011, 4097), False, 'import discord\n'), ((4305, 4325), 'random.choice', 'random.choice', (['blush'], {}), '(blush)\n', (4318, 4325), False, 'import random\n'), ((4342, 4430), 'discord.Embed', 'discord.Embed', ([], {'description': 'f"""**{ctx.author.name}** is blushing!"""', 'color': 'embed_color'}), "(description=f'**{ctx.author.name}** is blushing!', color=\n embed_color)\n", (4355, 4430), False, 'import discord\n'), ((4637, 4658), 'random.choice', 'random.choice', (['shines'], {}), '(shines)\n', (4650, 4658), False, 'import random\n'), ((4675, 4762), 'discord.Embed', 'discord.Embed', ([], {'description': 'f"""**{ctx.author.name}** is shining!"""', 'color': 'embed_color'}), "(description=f'**{ctx.author.name}** is shining!', color=\n embed_color)\n", (4688, 4762), False, 'import discord\n'), ((4969, 4989), 'random.choice', 'random.choice', (['happy'], {}), '(happy)\n', (4982, 4989), False, 'import random\n'), ((5006, 5094), 'discord.Embed', 'discord.Embed', ([], {'description': 'f"""**{ctx.author.name}** feels happy!"""', 'color': 'embed_color'}), "(description=f'**{ctx.author.name}** feels happy!', color=\n embed_color)\n", (5019, 5094), False, 'import discord\n'), ((5301, 5321), 'random.choice', 'random.choice', (['angry'], {}), '(angry)\n', (5314, 5321), False, 'import random\n'), ((5338, 5426), 'discord.Embed', 'discord.Embed', ([], {'description': 'f"""**{ctx.author.name}** feels angry!"""', 'color': 'embed_color'}), "(description=f'**{ctx.author.name}** feels angry!', color=\n embed_color)\n", (5351, 5426), False, 'import discord\n'), ((5627, 5645), 'random.choice', 'random.choice', (['sad'], {}), '(sad)\n', (5640, 5645), False, 'import random\n'), ((5662, 5748), 'discord.Embed', 'discord.Embed', ([], {'description': 'f"""**{ctx.author.name}** feels sad!"""', 'color': 'embed_color'}), "(description=f'**{ctx.author.name}** feels sad!', color=\n embed_color)\n", (5675, 5748), False, 'import discord\n'), ((5957, 5979), 'random.choice', 'random.choice', (['dancing'], {}), '(dancing)\n', (5970, 5979), False, 'import random\n'), ((5996, 6093), 'discord.Embed', 'discord.Embed', ([], {'description': 'f"""**{ctx.author.name}** is cheerfully dancing!"""', 'color': 'embed_color'}), "(description=f'**{ctx.author.name}** is cheerfully dancing!',\n color=embed_color)\n", (6009, 6093), False, 'import discord\n'), ((6332, 6351), 'random.choice', 'random.choice', (['dabs'], {}), '(dabs)\n', (6345, 6351), False, 'import random\n'), ((6368, 6455), 'discord.Embed', 'discord.Embed', ([], {'description': 'f"""**{ctx.author.name}** is dabbing!"""', 'color': 'embed_color'}), "(description=f'**{ctx.author.name}** is dabbing!', color=\n embed_color)\n", (6381, 6455), False, 'import discord\n')] |
import os
import csv
from configparser import ConfigParser
from rom import ROM
from patch import Patch
__PROJECT_CONFIG_FILE_NAME = 'project.ini'
__CONFIG = None
def build_rom(buf=None):
config = get_config()
patch_list = get_patch_list()
ff6 = ROM.from_file(config['base_rom'])
for n, patch in enumerate(get_patch_list()):
if not patch.apply:
continue
print('Applying patch {} ({}/{})'.format(
patch.file_name,
n + 1,
len(patch_list),
))
ff6.apply_patch(patch)
return ff6
def get_patch_list():
config = get_config()
patch_list_file_name = config['patch_list']
patch_list = []
with open(patch_list_file_name, 'r') as csv_fobj:
for row in csv.DictReader(csv_fobj):
patch_list.append(Patch.from_file(
os.path.join(config['patch_dir'], row['file_name']),
row['header'] == 'y',
row['apply'] == 'y',
row['notes']
))
return patch_list
def save_patch_list(patch_list):
config = get_config()
patch_list_file_name = config['patch_list']
with open(patch_list_file_name, 'w', newline='') as csv_fobj:
writer = csv.writer(csv_fobj)
writer.writerow(('file_name', 'header', 'apply', 'notes'))
for patch in patch_list:
writer.writerow((
patch.file_name,
patch.header and 'y' or 'n',
patch.apply and 'y' or 'n',
patch.notes
))
def get_config():
global __CONFIG
if __CONFIG is None:
config = ConfigParser()
config.read(__PROJECT_CONFIG_FILE_NAME)
__CONFIG = {}
for key in config['project']:
__CONFIG[key] = config['project'][key]
return __CONFIG
# vi: et sw=4 ts=4 tw=79
| [
"csv.DictReader",
"configparser.ConfigParser",
"csv.writer",
"os.path.join",
"rom.ROM.from_file"
] | [((261, 294), 'rom.ROM.from_file', 'ROM.from_file', (["config['base_rom']"], {}), "(config['base_rom'])\n", (274, 294), False, 'from rom import ROM\n'), ((767, 791), 'csv.DictReader', 'csv.DictReader', (['csv_fobj'], {}), '(csv_fobj)\n', (781, 791), False, 'import csv\n'), ((1241, 1261), 'csv.writer', 'csv.writer', (['csv_fobj'], {}), '(csv_fobj)\n', (1251, 1261), False, 'import csv\n'), ((1638, 1652), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (1650, 1652), False, 'from configparser import ConfigParser\n'), ((856, 907), 'os.path.join', 'os.path.join', (["config['patch_dir']", "row['file_name']"], {}), "(config['patch_dir'], row['file_name'])\n", (868, 907), False, 'import os\n')] |
import sys
import os
sys.path.append(os.path.abspath("./"))
from nnst import downloader as downloader
import pprint
import argparse
import nnst.nnst as nnst
parser=argparse.ArgumentParser()
parser.add_argument('--csv_path', help='csv파일 경로')
parser.add_argument('--date', help='시작할 뉴스 일자')
parser.add_argument('--num', help='파싱할 뉴스 개수')
parser.add_argument('--num_train', help='트레이닝셋 사이즈')
csv_path = 'csv/NNST_data.csv'
date = '20180914'
num = 1000
num_train = 900
print(parser.format_help())
args = parser.parse_args().__dict__
if args['csv_path'] is not None:
csv_path = str(args['csv_path'])
if args['date'] is not None:
date = str(args['date'])
if args['num'] is not None:
num = int(args['num'])
if args['num_train'] is not None:
num_train = int(args['num_train'])
downloader.download(num, csv_path, date)
data = nnst.load_data(csv_path)
train, test = nnst.div_dataset(data, train_size=num_train)
print('------train set------')
pprint.pprint(train)
print('---------------------\n')
print('------test set------')
pprint.pprint(test)
print('---------------------\n')
batch = nnst.random_batch(train,batch_size=100)
print('------batch set------')
pprint.pprint(batch)
print('---------------------')
| [
"nnst.nnst.load_data",
"argparse.ArgumentParser",
"nnst.downloader.download",
"nnst.nnst.random_batch",
"os.path.abspath",
"pprint.pprint",
"nnst.nnst.div_dataset"
] | [((165, 190), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (188, 190), False, 'import argparse\n'), ((791, 831), 'nnst.downloader.download', 'downloader.download', (['num', 'csv_path', 'date'], {}), '(num, csv_path, date)\n', (810, 831), True, 'from nnst import downloader as downloader\n'), ((839, 863), 'nnst.nnst.load_data', 'nnst.load_data', (['csv_path'], {}), '(csv_path)\n', (853, 863), True, 'import nnst.nnst as nnst\n'), ((879, 923), 'nnst.nnst.div_dataset', 'nnst.div_dataset', (['data'], {'train_size': 'num_train'}), '(data, train_size=num_train)\n', (895, 923), True, 'import nnst.nnst as nnst\n'), ((956, 976), 'pprint.pprint', 'pprint.pprint', (['train'], {}), '(train)\n', (969, 976), False, 'import pprint\n'), ((1041, 1060), 'pprint.pprint', 'pprint.pprint', (['test'], {}), '(test)\n', (1054, 1060), False, 'import pprint\n'), ((1103, 1143), 'nnst.nnst.random_batch', 'nnst.random_batch', (['train'], {'batch_size': '(100)'}), '(train, batch_size=100)\n', (1120, 1143), True, 'import nnst.nnst as nnst\n'), ((1174, 1194), 'pprint.pprint', 'pprint.pprint', (['batch'], {}), '(batch)\n', (1187, 1194), False, 'import pprint\n'), ((37, 58), 'os.path.abspath', 'os.path.abspath', (['"""./"""'], {}), "('./')\n", (52, 58), False, 'import os\n')] |
from openpyxl import load_workbook
from .cell import Cell
wb = load_workbook('test.xlsx')
sheet = wb.active
cell = sheet['H4']
mycell = Cell(cell)
| [
"openpyxl.load_workbook"
] | [((65, 91), 'openpyxl.load_workbook', 'load_workbook', (['"""test.xlsx"""'], {}), "('test.xlsx')\n", (78, 91), False, 'from openpyxl import load_workbook\n')] |
"""Exploratory Data Analysis. This module creates the values for Table 1 (summary statistics)."""
# %%
# Import necessary packages
import os
import numpy as np
from pathlib import Path
# %%
# Set up folder path
code_folder = Path(os.path.abspath(''))
print(code_folder)
project_dir = os.path.dirname(code_folder)
os.chdir(project_dir)
print(project_dir)
# %%
# Import the (mostly) cleaned and formatted data
from setup_fin_dataset import get_dataset
from setup_fin_dataset import OBS_DATASET
# %%
df = get_dataset()
# %%
# Examine the dataframe shape and columns
print(df.shape)
print(df.columns)
# %%
'''A bit of cleaning
'''
# Remove negative values in Total Net Family Income (TNFI) as they refer to non-responses.
df['TNFI_TRUNC'] = df['TNFI_TRUNC'].replace(-3, np.nan).replace(-2, np.nan).replace(-1, np.nan)
# %%
# Remove rows that don't have aptitude and attitude scores
df.dropna(axis=0, how='any', subset=['AFQT_1','ROSENBERG_SCORE', 'ROTTER_SCORE'], inplace=True)
df.shape
# %%
# Examine distribution of age in 1978
df2 = df[df['SURVEY_YEAR'] == 1978]
df2.shape
# %%
# Double check to make sure all rows with null values were dropped
df2[['AFQT_1','ROSENBERG_SCORE', 'ROTTER_SCORE']].isnull().sum()
# %%
# Examine the age distribution
df_age = df2.groupby('AGE')['IDENTIFIER'].nunique().sort_values(ascending=False)
df_age
# %%
'''SUMMARY STATISTICS TABLE
'''
# Summary statistics for age groups
# Ages 14-17
df_1978_a = df[(df['SURVEY_YEAR']==1978) & (df['AGE']>=13) & (df['AGE']<=17)]
df_1978_a[['AFQT_1', 'ROSENBERG_SCORE', 'ROTTER_SCORE']].describe()
# %%
# Ages 18-22
df_1978_b = df[(df['SURVEY_YEAR']==1978) & (df['AGE']>=18) & (df['AGE']<=22)]
df_1978_b[['AFQT_1', 'ROSENBERG_SCORE', 'ROTTER_SCORE']].describe()
# %%
# Summary statistics by gender
df_male = df2[df2['GENDER']==1]
df_male[['AFQT_1', 'ROSENBERG_SCORE', 'ROTTER_SCORE']].describe()
# %%
df_female = df2[df2['GENDER']==2]
df_female[['AFQT_1', 'ROSENBERG_SCORE', 'ROTTER_SCORE']].describe()
# %%
# Summary stasticis by race
df_hispanic = df2[df2['RACE']==1]
df_hispanic[['AFQT_1', 'ROSENBERG_SCORE', 'ROTTER_SCORE']].describe()
# %%
df_black = df2[df2['RACE']==2]
df_black[['AFQT_1', 'ROSENBERG_SCORE', 'ROTTER_SCORE']].describe()
# %%
df_non_hb = df2[df2['RACE']==3]
df_non_hb[['AFQT_1', 'ROSENBERG_SCORE', 'ROTTER_SCORE']].describe()
# %%
# Summary statistics by income quartile
df_q1 = df2[df2['FAMILY_INCOME_QUARTILE'].isin(['first quartile'])]
df_q1[['AFQT_1', 'ROSENBERG_SCORE', 'ROTTER_SCORE']].describe()
# %%
df_q2 = df2[df2['FAMILY_INCOME_QUARTILE'].isin(['second quartile'])]
df_q2[['AFQT_1', 'ROSENBERG_SCORE', 'ROTTER_SCORE']].describe()
# %%
df_q3 = df2[df2['FAMILY_INCOME_QUARTILE'].isin(['third quartile'])]
df_q3[['AFQT_1', 'ROSENBERG_SCORE', 'ROTTER_SCORE']].describe()
# %%
df_q4 = df2[df2['FAMILY_INCOME_QUARTILE'].isin(['fourth quartile'])]
df_q4[['AFQT_1', 'ROSENBERG_SCORE', 'ROTTER_SCORE']].describe()
# %%
# Examine parental education
df_mom_edu = df2.groupby('HIGHEST_GRADE_COMPLETED_MOTHER')['IDENTIFIER'].nunique().sort_values(ascending=False)
df_mom_edu
# %%
df_mom_edu = df2[df2['HIGHEST_GRADE_COMPLETED_MOTHER'] >= 12]
df_mom_edu[['AFQT_1', 'ROSENBERG_SCORE', 'ROTTER_SCORE']].describe()
# %%
df_mom_nedu = df2[df2['HIGHEST_GRADE_COMPLETED_MOTHER'] <= 11]
df_mom_nedu[['AFQT_1', 'ROSENBERG_SCORE', 'ROTTER_SCORE']].describe()
# %%
df_dad_edu = df2[df2['HIGHEST_GRADE_COMPLETED_FATHER'] >= 12]
df_dad_edu[['AFQT_1', 'ROSENBERG_SCORE', 'ROTTER_SCORE']].describe()
# %%
df_dad_nedu = df2[df2['HIGHEST_GRADE_COMPLETED_FATHER'] <= 11]
df_dad_nedu[['AFQT_1', 'ROSENBERG_SCORE', 'ROTTER_SCORE']].describe()
'''Re-construct income quartiles to get the range within each.
'''
# %%
print(OBS_DATASET)
# %%
# Construct family income quartile variable
trunc_data = OBS_DATASET.loc[OBS_DATASET['SURVEY_YEAR'] == 1978, ['TNFI_TRUNC']].dropna()
# %%
trunc_data.replace(-3, np.nan)
trunc_data.replace(-2, np.nan)
trunc_data.replace(-1, np.nan)
# %%
trunc_data.describe()
# %%
| [
"os.chdir",
"os.path.dirname",
"os.path.abspath",
"setup_fin_dataset.get_dataset"
] | [((287, 315), 'os.path.dirname', 'os.path.dirname', (['code_folder'], {}), '(code_folder)\n', (302, 315), False, 'import os\n'), ((316, 337), 'os.chdir', 'os.chdir', (['project_dir'], {}), '(project_dir)\n', (324, 337), False, 'import os\n'), ((508, 521), 'setup_fin_dataset.get_dataset', 'get_dataset', ([], {}), '()\n', (519, 521), False, 'from setup_fin_dataset import get_dataset\n'), ((233, 252), 'os.path.abspath', 'os.path.abspath', (['""""""'], {}), "('')\n", (248, 252), False, 'import os\n')] |
# encoding: utf-8
from __future__ import unicode_literals
from datetime import timedelta
from bson import ObjectId as oid
from common import FieldExam
from marrow.mongo import Document
from marrow.mongo.field import ObjectId
from marrow.mongo.util import utcnow
from marrow.schema.compat import unicode
class TestObjectIdField(FieldExam):
__field__ = ObjectId
def test_id_default(self):
class Sample(Document):
id = ObjectId('_id')
assert isinstance(Sample().id, oid)
def test_cast_string(self, Sample):
inst = Sample('5832223f927cc6c1a10609f7')
assert isinstance(inst.__data__['field'], oid)
assert unicode(inst.field) == '5832223f927cc6c1a10609f7'
def test_cast_oid(self, Sample):
v = oid()
inst = Sample(v)
assert inst.__data__['field'] is v
def test_cast_datetime(self, Sample):
v = utcnow()
inst = Sample(v)
assert isinstance(inst.__data__['field'], oid)
assert inst.field.generation_time == v
def test_cast_timedelta(self, Sample):
v = -timedelta(days=7)
r = (utcnow() + v)
inst = Sample(v)
assert isinstance(inst.__data__['field'], oid)
assert inst.field.generation_time == r
def test_cast_document(self, Sample):
v = {'_id': oid()}
inst = Sample(v)
assert inst.field == v['_id']
| [
"bson.ObjectId",
"marrow.mongo.util.utcnow",
"marrow.mongo.field.ObjectId",
"marrow.schema.compat.unicode",
"datetime.timedelta"
] | [((724, 729), 'bson.ObjectId', 'oid', ([], {}), '()\n', (727, 729), True, 'from bson import ObjectId as oid\n'), ((836, 844), 'marrow.mongo.util.utcnow', 'utcnow', ([], {}), '()\n', (842, 844), False, 'from marrow.mongo.util import utcnow\n'), ((431, 446), 'marrow.mongo.field.ObjectId', 'ObjectId', (['"""_id"""'], {}), "('_id')\n", (439, 446), False, 'from marrow.mongo.field import ObjectId\n'), ((632, 651), 'marrow.schema.compat.unicode', 'unicode', (['inst.field'], {}), '(inst.field)\n', (639, 651), False, 'from marrow.schema.compat import unicode\n'), ((1006, 1023), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (1015, 1023), False, 'from datetime import timedelta\n'), ((1031, 1039), 'marrow.mongo.util.utcnow', 'utcnow', ([], {}), '()\n', (1037, 1039), False, 'from marrow.mongo.util import utcnow\n'), ((1212, 1217), 'bson.ObjectId', 'oid', ([], {}), '()\n', (1215, 1217), True, 'from bson import ObjectId as oid\n')] |
import pytest
import aos_version
from collections import namedtuple
Package = namedtuple('Package', ['name', 'version'])
expected_pkgs = {
"spam": {
"name": "spam",
"version": "3.2.1",
"check_multi": False,
},
"eggs": {
"name": "eggs",
"version": "3.2.1",
"check_multi": False,
},
}
@pytest.mark.parametrize('pkgs,expected_pkgs_dict', [
(
# all found
[Package('spam', '3.2.1'), Package('eggs', '3.2.1')],
expected_pkgs,
),
(
# found with more specific version
[Package('spam', '3.2.1'), Package('eggs', '3.2.1.5')],
expected_pkgs,
),
(
[Package('ovs', '2.6'), Package('ovs', '2.4')],
{
"ovs": {
"name": "ovs",
"version": ["2.6", "2.7"],
"check_multi": False,
}
},
),
(
[Package('ovs', '2.7')],
{
"ovs": {
"name": "ovs",
"version": ["2.6", "2.7"],
"check_multi": False,
}
},
),
])
def test_check_precise_version_found(pkgs, expected_pkgs_dict):
aos_version._check_precise_version_found(pkgs, expected_pkgs_dict)
@pytest.mark.parametrize('pkgs,expect_not_found', [
(
[],
{
"spam": {
"name": "spam",
"version": "3.2.1",
"check_multi": False,
},
"eggs": {
"name": "eggs",
"version": "3.2.1",
"check_multi": False,
}
}, # none found
),
(
[Package('spam', '3.2.1')],
{
"eggs": {
"name": "eggs",
"version": "3.2.1",
"check_multi": False,
}
}, # completely missing
),
(
[Package('spam', '3.2.1'), Package('eggs', '3.3.2')],
{
"eggs": {
"name": "eggs",
"version": "3.2.1",
"check_multi": False,
}
}, # not the right version
),
(
[Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5')],
{
"spam": {
"name": "spam",
"version": "3.2.1",
"check_multi": False,
}
}, # eggs found with multiple versions
),
])
def test_check_precise_version_found_fail(pkgs, expect_not_found):
with pytest.raises(aos_version.PreciseVersionNotFound) as e:
aos_version._check_precise_version_found(pkgs, expected_pkgs)
assert list(expect_not_found.values()) == e.value.problem_pkgs
@pytest.mark.parametrize('pkgs,expected_pkgs_dict', [
(
[],
expected_pkgs,
),
(
# more precise but not strictly higher
[Package('spam', '3.2.1.9')],
expected_pkgs,
),
(
[Package('ovs', '2.7')],
{
"ovs": {
"name": "ovs",
"version": ["2.6", "2.7"],
"check_multi": False,
}
},
),
])
def test_check_higher_version_found(pkgs, expected_pkgs_dict):
aos_version._check_higher_version_found(pkgs, expected_pkgs_dict)
@pytest.mark.parametrize('pkgs,expected_pkgs_dict,expect_higher', [
(
[Package('spam', '3.3')],
expected_pkgs,
['spam-3.3'], # lower precision, but higher
),
(
[Package('spam', '3.2.1'), Package('eggs', '3.3.2')],
expected_pkgs,
['eggs-3.3.2'], # one too high
),
(
[Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5'), Package('eggs', '3.4')],
expected_pkgs,
['eggs-3.4'], # multiple versions, one is higher
),
(
[Package('eggs', '3.2.1'), Package('eggs', '3.4'), Package('eggs', '3.3')],
expected_pkgs,
['eggs-3.4'], # multiple versions, two are higher
),
(
[Package('ovs', '2.8')],
{
"ovs": {
"name": "ovs",
"version": ["2.6", "2.7"],
"check_multi": False,
}
},
['ovs-2.8'],
),
])
def test_check_higher_version_found_fail(pkgs, expected_pkgs_dict, expect_higher):
with pytest.raises(aos_version.FoundHigherVersion) as e:
aos_version._check_higher_version_found(pkgs, expected_pkgs_dict)
assert set(expect_higher) == set(e.value.problem_pkgs)
@pytest.mark.parametrize('pkgs', [
[],
[Package('spam', '3.2.1')],
[Package('spam', '3.2.1'), Package('eggs', '3.2.2')],
])
def test_check_multi_minor_release(pkgs):
aos_version._check_multi_minor_release(pkgs, expected_pkgs)
@pytest.mark.parametrize('pkgs,expect_to_flag_pkgs', [
(
[Package('spam', '3.2.1'), Package('spam', '3.3.2')],
['spam'],
),
(
[Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5'), Package('eggs', '3.4')],
['eggs'],
),
])
def test_check_multi_minor_release_fail(pkgs, expect_to_flag_pkgs):
with pytest.raises(aos_version.FoundMultiRelease) as e:
aos_version._check_multi_minor_release(pkgs, expected_pkgs)
assert set(expect_to_flag_pkgs) == set(e.value.problem_pkgs)
| [
"collections.namedtuple",
"aos_version._check_multi_minor_release",
"aos_version._check_precise_version_found",
"aos_version._check_higher_version_found",
"pytest.raises"
] | [((79, 121), 'collections.namedtuple', 'namedtuple', (['"""Package"""', "['name', 'version']"], {}), "('Package', ['name', 'version'])\n", (89, 121), False, 'from collections import namedtuple\n'), ((1188, 1254), 'aos_version._check_precise_version_found', 'aos_version._check_precise_version_found', (['pkgs', 'expected_pkgs_dict'], {}), '(pkgs, expected_pkgs_dict)\n', (1228, 1254), False, 'import aos_version\n'), ((3209, 3274), 'aos_version._check_higher_version_found', 'aos_version._check_higher_version_found', (['pkgs', 'expected_pkgs_dict'], {}), '(pkgs, expected_pkgs_dict)\n', (3248, 3274), False, 'import aos_version\n'), ((4666, 4725), 'aos_version._check_multi_minor_release', 'aos_version._check_multi_minor_release', (['pkgs', 'expected_pkgs'], {}), '(pkgs, expected_pkgs)\n', (4704, 4725), False, 'import aos_version\n'), ((2507, 2556), 'pytest.raises', 'pytest.raises', (['aos_version.PreciseVersionNotFound'], {}), '(aos_version.PreciseVersionNotFound)\n', (2520, 2556), False, 'import pytest\n'), ((2571, 2632), 'aos_version._check_precise_version_found', 'aos_version._check_precise_version_found', (['pkgs', 'expected_pkgs'], {}), '(pkgs, expected_pkgs)\n', (2611, 2632), False, 'import aos_version\n'), ((4297, 4342), 'pytest.raises', 'pytest.raises', (['aos_version.FoundHigherVersion'], {}), '(aos_version.FoundHigherVersion)\n', (4310, 4342), False, 'import pytest\n'), ((4357, 4422), 'aos_version._check_higher_version_found', 'aos_version._check_higher_version_found', (['pkgs', 'expected_pkgs_dict'], {}), '(pkgs, expected_pkgs_dict)\n', (4396, 4422), False, 'import aos_version\n'), ((5075, 5119), 'pytest.raises', 'pytest.raises', (['aos_version.FoundMultiRelease'], {}), '(aos_version.FoundMultiRelease)\n', (5088, 5119), False, 'import pytest\n'), ((5134, 5193), 'aos_version._check_multi_minor_release', 'aos_version._check_multi_minor_release', (['pkgs', 'expected_pkgs'], {}), '(pkgs, expected_pkgs)\n', (5172, 5193), False, 'import aos_version\n')] |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import file_verifier
import process_verifier
import registry_verifier
class VerifierRunner:
"""Runs all Verifiers."""
def __init__(self):
"""Constructor."""
# TODO(sukolsak): Implement other verifiers
self._verifiers = {
'Files': file_verifier.FileVerifier(),
'Processes': process_verifier.ProcessVerifier(),
'RegistryEntries': registry_verifier.RegistryVerifier(),
}
def VerifyAll(self, property, variable_expander):
"""Verifies that the current machine states match the property dictionary.
A property dictionary is a dictionary where each key is a verifier's name
and the associated value is the input to that verifier. For details about
the input format for each verifier, take a look at http://goo.gl/1P85WL
Args:
property: A property dictionary.
variable_expander: A VariableExpander object.
"""
for verifier_name, verifier_input in property.iteritems():
if verifier_name not in self._verifiers:
raise KeyError('Unknown verifier %s' % verifier_name)
self._verifiers[verifier_name].VerifyInput(verifier_input,
variable_expander)
| [
"registry_verifier.RegistryVerifier",
"process_verifier.ProcessVerifier",
"file_verifier.FileVerifier"
] | [((418, 446), 'file_verifier.FileVerifier', 'file_verifier.FileVerifier', ([], {}), '()\n', (444, 446), False, 'import file_verifier\n'), ((467, 501), 'process_verifier.ProcessVerifier', 'process_verifier.ProcessVerifier', ([], {}), '()\n', (499, 501), False, 'import process_verifier\n'), ((528, 564), 'registry_verifier.RegistryVerifier', 'registry_verifier.RegistryVerifier', ([], {}), '()\n', (562, 564), False, 'import registry_verifier\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import csv
import sys
import requests
from bs4 import BeautifulSoup
reload(sys)
sys.setdefaultencoding('utf8')
target_classes = [
'wotd-word',
'wotd-pronunciation',
'wotd-function',
'wotd-passage',
'wotd-definition',
'wotd-synonyms',
'wotd-source',
'wotd-published'
]
def fetch_data(url):
resp = requests.get(url)
if not resp.ok:
resp.raise_for_status()
return BeautifulSoup(resp.content, 'html.parser')
def update_urls():
soup = fetch_data('https://gre.economist.com/gre-vocabulary')
hrefs = soup.find_all('a', class_=['wotd-view-link', 'wotd-teaser-stub'])
return ["https://gre.economist.com%s" % h['href'] for h in hrefs]
def parse_data(soup):
article = soup.find('article')
tags = article.find_all(class_=target_classes)
result = {}
for tag in tags:
if tag.has_attr('class'):
k = tag.attrs['class'][0]
if k == 'wotd-source':
# reserve href for source
v = str(tag.contents[1])
else:
v = tag.text
result[k] = v
return result
def write_csv(output_f):
with open(output_f, 'w') as f:
csv_writer = csv.writer(f)
for url in update_urls():
print("Working on %s" % url)
soup = fetch_data(url)
word_info = parse_data(soup)
row = [word_info.get(k) for k in target_classes]
row = [i.strip() if i is not None else i for i in row]
csv_writer.writerow(row)
def main():
try:
output_filename = sys.argv[1]
except IndexError:
output_filename = './economist_gre_vocab.py'
print("Save to ./economist_gre_vocab.py")
write_csv(output_filename)
if __name__ == '__main__':
main()
| [
"bs4.BeautifulSoup",
"csv.writer",
"sys.setdefaultencoding",
"requests.get"
] | [((124, 154), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf8"""'], {}), "('utf8')\n", (146, 154), False, 'import sys\n'), ((379, 396), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (391, 396), False, 'import requests\n'), ((460, 502), 'bs4.BeautifulSoup', 'BeautifulSoup', (['resp.content', '"""html.parser"""'], {}), "(resp.content, 'html.parser')\n", (473, 502), False, 'from bs4 import BeautifulSoup\n'), ((1249, 1262), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (1259, 1262), False, 'import csv\n')] |
import torch
import torch.nn.functional as F
__all__ = ['kl_loss', 'huber_loss']
def kl_loss(x, y):
x = F.softmax(x.detach(), dim=1)
y = F.log_softmax(y, dim=1)
return torch.mean(torch.sum(x * (torch.log(x) - y), dim=1))
def huber_loss(error, delta):
abs_error = torch.abs(error)
quadratic = torch.min(abs_error, torch.full_like(abs_error, fill_value=delta))
losses = 0.5 * (quadratic ** 2) + delta * (abs_error - quadratic)
return torch.mean(losses)
| [
"torch.abs",
"torch.log",
"torch.mean",
"torch.full_like",
"torch.nn.functional.log_softmax"
] | [((148, 171), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['y'], {'dim': '(1)'}), '(y, dim=1)\n', (161, 171), True, 'import torch.nn.functional as F\n'), ((284, 300), 'torch.abs', 'torch.abs', (['error'], {}), '(error)\n', (293, 300), False, 'import torch\n'), ((465, 483), 'torch.mean', 'torch.mean', (['losses'], {}), '(losses)\n', (475, 483), False, 'import torch\n'), ((338, 382), 'torch.full_like', 'torch.full_like', (['abs_error'], {'fill_value': 'delta'}), '(abs_error, fill_value=delta)\n', (353, 382), False, 'import torch\n'), ((209, 221), 'torch.log', 'torch.log', (['x'], {}), '(x)\n', (218, 221), False, 'import torch\n')] |
import unittest
from app.main.loc_types import Point, PointWithDistance
from app.main.geohash import encode
class TestPoint(unittest.TestCase):
def test_to_json(self):
point = Point("id1", 50.45466, 30.5238)
expected = {
"point_id": "id1", "latitude": 50.45466, "longitude": 30.5238, "geohash": encode(50.45466, 30.5238)
}
self.assertEqual(point.to_json(), expected)
def test_from_json(self):
json = {"point_id": "id", "latitude": 50.45466, "longitude": 30.5238, "geohash": str(encode(50.45466, 30.5238))}
expected = Point("id", 50.45466, 30.5238)
self.assertEqual(Point.from_json(json), expected)
class TestPointWithDistance(unittest.TestCase):
def test_to_json(self):
point = PointWithDistance(Point("id1", 50.45466, 30.5238), 10)
expected = {
"point_id": "id1",
"latitude": 50.45466,
"longitude": 30.5238,
"geohash": encode(50.45466, 30.5238),
"distance": 10
}
self.assertEqual(point.to_json(), expected)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"app.main.loc_types.Point",
"app.main.loc_types.Point.from_json",
"app.main.geohash.encode"
] | [((1118, 1133), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1131, 1133), False, 'import unittest\n'), ((190, 221), 'app.main.loc_types.Point', 'Point', (['"""id1"""', '(50.45466)', '(30.5238)'], {}), "('id1', 50.45466, 30.5238)\n", (195, 221), False, 'from app.main.loc_types import Point, PointWithDistance\n'), ((588, 618), 'app.main.loc_types.Point', 'Point', (['"""id"""', '(50.45466)', '(30.5238)'], {}), "('id', 50.45466, 30.5238)\n", (593, 618), False, 'from app.main.loc_types import Point, PointWithDistance\n'), ((329, 354), 'app.main.geohash.encode', 'encode', (['(50.45466)', '(30.5238)'], {}), '(50.45466, 30.5238)\n', (335, 354), False, 'from app.main.geohash import encode\n'), ((644, 665), 'app.main.loc_types.Point.from_json', 'Point.from_json', (['json'], {}), '(json)\n', (659, 665), False, 'from app.main.loc_types import Point, PointWithDistance\n'), ((789, 820), 'app.main.loc_types.Point', 'Point', (['"""id1"""', '(50.45466)', '(30.5238)'], {}), "('id1', 50.45466, 30.5238)\n", (794, 820), False, 'from app.main.loc_types import Point, PointWithDistance\n'), ((969, 994), 'app.main.geohash.encode', 'encode', (['(50.45466)', '(30.5238)'], {}), '(50.45466, 30.5238)\n', (975, 994), False, 'from app.main.geohash import encode\n'), ((541, 566), 'app.main.geohash.encode', 'encode', (['(50.45466)', '(30.5238)'], {}), '(50.45466, 30.5238)\n', (547, 566), False, 'from app.main.geohash import encode\n')] |
"""
3D Agn spin visualisation
"""
import logging
import os
import shutil
import corner
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyvista as pv
import scipy.stats
from bbh_simulator.calculate_kick_vel_from_samples import Samples
from matplotlib import rc
from tqdm import tqdm
logging.getLogger("bbh_simulator").setLevel(logging.ERROR)
logging.getLogger().setLevel(logging.ERROR)
rc("text", usetex=True)
N_VEC = "Num BBH"
COS_theta_12 = "cos(theta_12)"
COS_theta_1L = "cos(theta_1L)"
BILBY_BLUE_COLOR = "#0072C1"
VIOLET_COLOR = "#8E44AD"
PARAMS = dict(
chi_eff=dict(l=r"$\chi_{eff}$", r=(-1, 1)),
chi_p=dict(l=r"$\chi_{p}$", r=(0, 1)),
cos_tilt_1=dict(l=r"$\cos(t1)$", r=(-1, 1)),
cos_tilt_2=dict(l=r"$\cos(t2)$", r=(-1, 1)),
cos_theta_12=dict(l=r"$\cos \theta_{12}$", r=(-1, 1)),
cos_theta_1L=dict(l=r"$\cos \theta_{1L}$", r=(-1, 1)),
)
def rotate_vector_along_z(v1, theta):
"""
|cos tilt −sin tilt 0| |x| |x cos tilt − y sin tilt| |x'|
|sin tilt cos tilt 0| |y| = |x sin tilt + y cos tilt| = |y'|
| 0 0 1| |z| | z | |z'|
"""
x, y, z = v1[0], v1[1], v1[2]
return [
x * np.cos(theta) - y * np.sin(theta),
x * np.sin(theta) + y * np.cos(theta),
z,
]
def rotate_vector_along_y(v1, theta):
"""
| cos tilt 0 sin tilt| |grid_x| | grid_x cos tilt + pred_z sin tilt| |grid_x'|
| 0 1 0| |grid_y| = | grid_y | = |grid_y'|
|−sin tilt 0 cos tilt| |pred_z| |−grid_x sin tilt + pred_z cos tilt| |pred_z'|
"""
x, y, z = v1[0], v1[1], v1[2]
return [
x * np.cos(theta) + z * np.sin(theta),
y,
-x * np.sin(theta) + z * np.cos(theta),
]
def get_isotropic_vector(std=1):
"""
Generates a random 3D unit vector (direction) with a uniform spherical distribution
Algo from http://stackoverflow.com/questions/5408276/python-uniform-spherical-distribution
:return:
"""
theta = np.random.uniform(0, np.pi * 2)
# truncated normal distribution --> peaks at costheta = 1
# hyperparam --> sigma
# costheta = np.random.uniform(std, 1)
mean = 1
clip_a, clip_b = -1, 1
if std == 0:
std = 0.00001
a, b = (clip_a - mean) / std, (clip_b - mean) / std
costheta = scipy.stats.truncnorm.rvs(
a=a, b=b, loc=mean, scale=std, size=1
)[0]
theta = np.arccos(costheta)
x = np.sin(theta) * np.cos(theta)
y = np.sin(theta) * np.sin(theta)
z = np.cos(theta)
return [x, y, z]
def rotate_v2_to_v1(v1, v2):
azimuth = get_azimuth_angle(v1[0], v1[1])
zenith = get_zenith_angle(v1[2])
v2 = rotate_vector_along_y(v2, zenith)
v2 = rotate_vector_along_z(v2, azimuth)
return v2
def compute_vectors(mesh):
origin = 0
vectors = mesh.points - origin
vectors = normalise_vectors(vectors)
return vectors
def normalise_vectors(vectors):
return vectors / np.linalg.norm(vectors, axis=1)[:, None]
class SphereAngleAnimation:
def __init__(self):
# default parameters
self.kwargs = {
"radius": 1,
N_VEC: 100,
COS_theta_1L: 1,
COS_theta_12: 1,
}
self.s1_color = "lightblue"
self.s2_color = "lightgreen"
self.plotter = self.init_plotter()
self.add_sliders()
self.plotter.show("AGN BBH spins")
self.add_vectors()
def __call__(self, param, value):
self.kwargs[param] = value
self.update()
def add_sliders(self):
LEFT = dict(
pointa=(0.025, 0.1),
pointb=(0.31, 0.1),
)
MIDDLE = dict(pointa=(0.35, 0.1), pointb=(0.64, 0.1))
RIGHT = dict(
pointa=(0.67, 0.1),
pointb=(0.98, 0.1),
)
self.plotter.add_slider_widget(
callback=lambda value: self(COS_theta_1L, value),
rng=[0, 1],
value=1,
title=f"min {COS_theta_1L}",
style="modern",
**LEFT,
)
self.plotter.add_slider_widget(
callback=lambda value: self(COS_theta_12, value),
rng=[0, 1],
value=1,
title=f"min {COS_theta_12}",
style="modern",
**MIDDLE,
)
self.plotter.add_slider_widget(
callback=lambda value: self(N_VEC, int(value)),
rng=[1, 1000],
value=100,
title=N_VEC,
style="modern",
**RIGHT,
)
def init_plotter(self):
p = pv.Plotter()
p.add_mesh(pv.Sphere(radius=self.kwargs["radius"]))
ar_kwgs = dict(
scale=self.kwargs["radius"] * 2,
shaft_radius=0.01,
tip_radius=0.05,
tip_length=0.1,
)
p.add_mesh(pv.Arrow(direction=[1, 0, 0], **ar_kwgs), color="blue") # x
p.add_mesh(pv.Arrow(direction=[0, 1, 0], **ar_kwgs), color="red") # y
p.add_mesh(
pv.Arrow(direction=[0, 0, 1], **ar_kwgs), color="green"
) # Z
p.add_legend(
labels=[
["L", "green"],
["S1", self.s1_color],
["S2", self.s2_color],
]
)
return p
def add_vectors(self):
s1_vectors = [
get_isotropic_vector(self.kwargs[COS_theta_1L])
for _ in range(self.kwargs[N_VEC])
]
s2_vectors = [
get_isotropic_vector(self.kwargs[COS_theta_12])
for _ in range(self.kwargs[N_VEC])
]
s2_vectors = [
rotate_v2_to_v1(s1, s2) for s1, s2 in zip(s1_vectors, s2_vectors)
]
self.add_vector_list(s1_vectors, name="s1", color=self.s1_color)
self.add_vector_list(s2_vectors, name="s2", color=self.s2_color)
def add_vector_list(self, vectors, name, color):
self.plotter.remove_actor(f"{name}_pts")
self.plotter.remove_actor(f"{name}_arrows")
pt_cloud = pv.PolyData(vectors)
vectors = compute_vectors(pt_cloud)
pt_cloud["vectors"] = vectors
arrows = pt_cloud.glyph(
orient="vectors",
scale=False,
factor=0.3,
)
self.plotter.add_mesh(
pt_cloud,
color=color,
point_size=10,
render_points_as_spheres=True,
name=f"{name}_pts",
)
self.plotter.add_mesh(arrows, color=color, name=f"{name}_arrows")
def update(self):
self.add_vectors()
def get_zenith_angle(z):
"""Angle from z to vector [0, pi)"""
return np.arccos(z)
def get_azimuth_angle(x, y):
"""angle bw north vector and projected vector on the horizontal plane [0, 2pi]"""
azimuth = np.arctan2(y, x) # [-pi, pi)
if azimuth < 0.0:
azimuth += 2 * np.pi
return azimuth
def get_chi_eff(s1, s2, q=1):
s1z, s2z = s1[2], s2[2]
return (s1z * s2z) * (q / (1 + q))
def get_chi_p(s1, s2, q=1):
chi1p = np.sqrt(s1[0] ** 2 + s1[1] ** 2)
chi2p = np.sqrt(s2[0] ** 2 + s2[1] ** 2)
qfactor = q * ((4 * q) + 3) / (4 + (3 * q))
return np.maximum(chi1p, chi2p * qfactor)
N = 1000
def convert_vectors_to_bbh_param(cos_theta1L_std, cos_theta12_std):
"""Generate BBH spin vectors and convert to LIGO BBH params
cos_tilt_i:
Cosine of the zenith angle between the s and j [-1,1]
theta_12:
diff bw azimuthal angles of the s1hat+s2 projections on orbital plane [0, 2pi]
theta_jl:
diff bw L and J azimuthal angles [0, 2pi]
"""
n = N
lhat = normalise_vectors([[0, 0, 1] for _ in range(n)])
s1hat = normalise_vectors(
[get_isotropic_vector(cos_theta1L_std) for _ in range(n)]
)
s2hat = normalise_vectors(
[get_isotropic_vector(cos_theta12_std) for _ in range(n)]
)
s2hat = normalise_vectors(
[rotate_v2_to_v1(s1v, s2v) for s1v, s2v in zip(s1hat, s2hat)]
)
df = pd.DataFrame(
dict(
spin_1x=s1hat[:, 0],
spin_1y=s1hat[:, 1],
spin_1z=s1hat[:, 2],
spin_2x=s2hat[:, 0],
spin_2y=s2hat[:, 1],
spin_2z=s2hat[:, 2],
cos_tilt_1=np.cos([get_zenith_angle(v[2]) for v in s1hat]),
cos_tilt_2=np.cos([get_zenith_angle(v[2]) for v in s2hat]),
chi_eff=[get_chi_eff(s1, s2) for s1, s2 in zip(s1hat, s2hat)],
chi_p=[get_chi_p(s1, s2) for s1, s2 in zip(s1hat, s2hat)],
cos_theta_12=[
np.cos(get_angle_bw_vectors(s1, s2))
for s1, s2 in zip(s1hat, s2hat)
],
cos_theta_1L=[
np.cos(get_angle_bw_vectors(s1, l))
for s1, l in zip(s1hat, lhat)
],
mass_1_source=[25 for _ in s1hat],
mass_2_source=[25 for _ in s1hat],
)
)
s = Samples(posterior=df)
# s.calculate_remnant_kick_velocity()
return s.posterior
def get_angle_bw_vectors(v1, v2):
unit_vector1 = v1 / np.linalg.norm(v1)
unit_vector2 = v2 / np.linalg.norm(v2)
dot_product = np.dot(unit_vector1, unit_vector2)
return np.arccos(dot_product)
def plot_corner_of_spins(cos_theta1L_std, cos_theta12_std, save=True):
bbh_vectors = convert_vectors_to_bbh_param(
cos_theta1L_std=cos_theta1L_std, cos_theta12_std=cos_theta12_std
)
params = [p for p in PARAMS.keys()]
bbh_vectors = bbh_vectors[params]
labels = [PARAMS[p]["l"] for p in params]
range = [PARAMS[p]["r"] for p in params]
corner.corner(bbh_vectors, **CORNER_KWARGS, labels=labels, range=range)
if save:
plt.savefig(
f"spins_theta1L{cos_theta1L_std:.2f}_theta12{cos_theta12_std:.2f}.png"
)
def get_normalisation_weight(len_current_samples, len_of_longest_samples):
return np.ones(len_current_samples) * (
len_of_longest_samples / len_current_samples
)
def plot_overlaid_corners(cos_theta1L_std_vals, cos_theta12_std_vals, pltdir):
params = dict(
chi_eff=dict(l=r"$\chi_{eff}$", r=(-1, 1)),
chi_p=dict(l=r"$\chi_{p}$", r=(-1, 1)),
cos_tilt_1=dict(l=r"$\cos(t1)$", r=(-1, 1)),
cos_theta_12=dict(l=r"$\cos \theta_{12}$", r=(-1, 1)),
remnant_kick_mag=dict(l=r"$|\vec{v}_k|\ $km/s", r=(0, 3000)),
)
base = convert_vectors_to_bbh_param(cos_theta1L_std=1, cos_theta12_std=1)
labels = [params[p]["l"] for p in params]
range = [params[p]["r"] for p in params]
kwargs = dict(**CORNER_KWARGS, labels=labels, range=range)
if os.path.isdir(pltdir):
shutil.rmtree(pltdir)
os.makedirs(pltdir, exist_ok=False)
i = 0
for min_cos_theta1L, min_cos_theta12 in tqdm(
zip(cos_theta1L_std_vals, cos_theta12_std_vals),
total=len(cos_theta1L_std_vals),
desc="Hyper-Param settings",
):
f = f"{pltdir}/{i:02}_p12{min_cos_theta12:.1f}_p1L{min_cos_theta1L:.1f}.png"
compare = convert_vectors_to_bbh_param(
cos_theta1L_std=min_cos_theta1L, cos_theta12_std=min_cos_theta12
)
compare.to_csv(f.replace(".png", ".csv"))
fig = corner.corner(base[params], **kwargs, color=BILBY_BLUE_COLOR)
normalising_weights = get_normalisation_weight(
len(compare), max(len(compare), len(base))
)
corner.corner(
compare[params],
fig=fig,
weights=normalising_weights,
**kwargs,
color=VIOLET_COLOR,
)
orig_line = mlines.Line2D(
[], [], color=BILBY_BLUE_COLOR, label="Isotropic Spins"
)
weighted_line = mlines.Line2D(
[],
[],
color=VIOLET_COLOR,
label=f"Adjusted spins $\sigma \cos(12)={min_cos_theta12:.1f}, \sigma \cos(1L)={min_cos_theta1L:.1f}$",
)
plt.legend(
handles=[orig_line, weighted_line],
fontsize=25,
frameon=False,
bbox_to_anchor=(1, len(labels)),
loc="upper right",
)
plt.savefig(f)
plt.close()
i += 1
import glob
from bilby_report.tools import image_utils
def save_gif(gifname, outdir="gif", loop=False):
image_paths = glob.glob(f"{outdir}/*.png")
gif_filename = os.path.join(outdir, gifname)
orig_len = len(image_paths)
image_paths.sort()
if loop:
image_paths += image_paths[::-1]
assert orig_len <= len(image_paths)
image_utils.make_gif(
image_paths=image_paths, duration=50, gif_save_path=gif_filename
)
print(f"Saved gif {gif_filename}")
if __name__ == "__main__":
r = SphereAngleAnimation()
# varying = list(np.arange(0, 2.1, 0.5))
# constant = [1 for i in range(len(varying))]
#
# outdir = "../output/vary_12"
# plot_overlaid_corners(cos_theta1L_std_vals=constant,
# cos_theta12_std_vals=varying, pltdir=outdir)
# save_gif("vary_12.gif", outdir=outdir, loop=True)
#
# outdir = "../output/vary_1L"
# plot_overlaid_corners(cos_theta1L_std_vals=varying,
# cos_theta12_std_vals=constant, pltdir=outdir)
# save_gif("vary_1L.gif", outdir=outdir, loop=True)
| [
"logging.getLogger",
"numpy.arccos",
"numpy.sqrt",
"bilby_report.tools.image_utils.make_gif",
"numpy.arctan2",
"matplotlib.rc",
"numpy.linalg.norm",
"numpy.sin",
"corner.corner",
"matplotlib.lines.Line2D",
"pyvista.Arrow",
"bbh_simulator.calculate_kick_vel_from_samples.Samples",
"pyvista.Pol... | [((452, 475), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (454, 475), False, 'from matplotlib import rc\n'), ((2082, 2113), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(np.pi * 2)'], {}), '(0, np.pi * 2)\n', (2099, 2113), True, 'import numpy as np\n'), ((2492, 2511), 'numpy.arccos', 'np.arccos', (['costheta'], {}), '(costheta)\n', (2501, 2511), True, 'import numpy as np\n'), ((2596, 2609), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2602, 2609), True, 'import numpy as np\n'), ((6720, 6732), 'numpy.arccos', 'np.arccos', (['z'], {}), '(z)\n', (6729, 6732), True, 'import numpy as np\n'), ((6864, 6880), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (6874, 6880), True, 'import numpy as np\n'), ((7105, 7137), 'numpy.sqrt', 'np.sqrt', (['(s1[0] ** 2 + s1[1] ** 2)'], {}), '(s1[0] ** 2 + s1[1] ** 2)\n', (7112, 7137), True, 'import numpy as np\n'), ((7150, 7182), 'numpy.sqrt', 'np.sqrt', (['(s2[0] ** 2 + s2[1] ** 2)'], {}), '(s2[0] ** 2 + s2[1] ** 2)\n', (7157, 7182), True, 'import numpy as np\n'), ((7242, 7276), 'numpy.maximum', 'np.maximum', (['chi1p', '(chi2p * qfactor)'], {}), '(chi1p, chi2p * qfactor)\n', (7252, 7276), True, 'import numpy as np\n'), ((8983, 9004), 'bbh_simulator.calculate_kick_vel_from_samples.Samples', 'Samples', ([], {'posterior': 'df'}), '(posterior=df)\n', (8990, 9004), False, 'from bbh_simulator.calculate_kick_vel_from_samples import Samples\n'), ((9211, 9245), 'numpy.dot', 'np.dot', (['unit_vector1', 'unit_vector2'], {}), '(unit_vector1, unit_vector2)\n', (9217, 9245), True, 'import numpy as np\n'), ((9257, 9279), 'numpy.arccos', 'np.arccos', (['dot_product'], {}), '(dot_product)\n', (9266, 9279), True, 'import numpy as np\n'), ((9653, 9724), 'corner.corner', 'corner.corner', (['bbh_vectors'], {'labels': 'labels', 'range': 'range'}), '(bbh_vectors, **CORNER_KWARGS, labels=labels, range=range)\n', (9666, 9724), False, 'import corner\n'), ((10665, 10686), 'os.path.isdir', 'os.path.isdir', (['pltdir'], {}), '(pltdir)\n', (10678, 10686), False, 'import os\n'), ((10722, 10757), 'os.makedirs', 'os.makedirs', (['pltdir'], {'exist_ok': '(False)'}), '(pltdir, exist_ok=False)\n', (10733, 10757), False, 'import os\n'), ((12340, 12368), 'glob.glob', 'glob.glob', (['f"""{outdir}/*.png"""'], {}), "(f'{outdir}/*.png')\n", (12349, 12368), False, 'import glob\n'), ((12388, 12417), 'os.path.join', 'os.path.join', (['outdir', 'gifname'], {}), '(outdir, gifname)\n', (12400, 12417), False, 'import os\n'), ((12571, 12662), 'bilby_report.tools.image_utils.make_gif', 'image_utils.make_gif', ([], {'image_paths': 'image_paths', 'duration': '(50)', 'gif_save_path': 'gif_filename'}), '(image_paths=image_paths, duration=50, gif_save_path=\n gif_filename)\n', (12591, 12662), False, 'from bilby_report.tools import image_utils\n'), ((348, 382), 'logging.getLogger', 'logging.getLogger', (['"""bbh_simulator"""'], {}), "('bbh_simulator')\n", (365, 382), False, 'import logging\n'), ((407, 426), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (424, 426), False, 'import logging\n'), ((2520, 2533), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2526, 2533), True, 'import numpy as np\n'), ((2536, 2549), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2542, 2549), True, 'import numpy as np\n'), ((2558, 2571), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2564, 2571), True, 'import numpy as np\n'), ((2574, 2587), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2580, 2587), True, 'import numpy as np\n'), ((4666, 4678), 'pyvista.Plotter', 'pv.Plotter', ([], {}), '()\n', (4676, 4678), True, 'import pyvista as pv\n'), ((6102, 6122), 'pyvista.PolyData', 'pv.PolyData', (['vectors'], {}), '(vectors)\n', (6113, 6122), True, 'import pyvista as pv\n'), ((9131, 9149), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (9145, 9149), True, 'import numpy as np\n'), ((9174, 9192), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (9188, 9192), True, 'import numpy as np\n'), ((9746, 9834), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""spins_theta1L{cos_theta1L_std:.2f}_theta12{cos_theta12_std:.2f}.png"""'], {}), "(\n f'spins_theta1L{cos_theta1L_std:.2f}_theta12{cos_theta12_std:.2f}.png')\n", (9757, 9834), True, 'import matplotlib.pyplot as plt\n'), ((9940, 9968), 'numpy.ones', 'np.ones', (['len_current_samples'], {}), '(len_current_samples)\n', (9947, 9968), True, 'import numpy as np\n'), ((10696, 10717), 'shutil.rmtree', 'shutil.rmtree', (['pltdir'], {}), '(pltdir)\n', (10709, 10717), False, 'import shutil\n'), ((11246, 11307), 'corner.corner', 'corner.corner', (['base[params]'], {'color': 'BILBY_BLUE_COLOR'}), '(base[params], **kwargs, color=BILBY_BLUE_COLOR)\n', (11259, 11307), False, 'import corner\n'), ((11437, 11540), 'corner.corner', 'corner.corner', (['compare[params]'], {'fig': 'fig', 'weights': 'normalising_weights', 'color': 'VIOLET_COLOR'}), '(compare[params], fig=fig, weights=normalising_weights, **\n kwargs, color=VIOLET_COLOR)\n', (11450, 11540), False, 'import corner\n'), ((11628, 11698), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': 'BILBY_BLUE_COLOR', 'label': '"""Isotropic Spins"""'}), "([], [], color=BILBY_BLUE_COLOR, label='Isotropic Spins')\n", (11641, 11698), True, 'import matplotlib.lines as mlines\n'), ((11745, 11904), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': 'VIOLET_COLOR', 'label': 'f"""Adjusted spins $\\\\sigma \\\\cos(12)={min_cos_theta12:.1f}, \\\\sigma \\\\cos(1L)={min_cos_theta1L:.1f}$"""'}), "([], [], color=VIOLET_COLOR, label=\n f'Adjusted spins $\\\\sigma \\\\cos(12)={min_cos_theta12:.1f}, \\\\sigma \\\\cos(1L)={min_cos_theta1L:.1f}$'\n )\n", (11758, 11904), True, 'import matplotlib.lines as mlines\n'), ((12164, 12178), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f'], {}), '(f)\n', (12175, 12178), True, 'import matplotlib.pyplot as plt\n'), ((12187, 12198), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12196, 12198), True, 'import matplotlib.pyplot as plt\n'), ((3040, 3071), 'numpy.linalg.norm', 'np.linalg.norm', (['vectors'], {'axis': '(1)'}), '(vectors, axis=1)\n', (3054, 3071), True, 'import numpy as np\n'), ((4698, 4737), 'pyvista.Sphere', 'pv.Sphere', ([], {'radius': "self.kwargs['radius']"}), "(radius=self.kwargs['radius'])\n", (4707, 4737), True, 'import pyvista as pv\n'), ((4925, 4965), 'pyvista.Arrow', 'pv.Arrow', ([], {'direction': '[1, 0, 0]'}), '(direction=[1, 0, 0], **ar_kwgs)\n', (4933, 4965), True, 'import pyvista as pv\n'), ((5005, 5045), 'pyvista.Arrow', 'pv.Arrow', ([], {'direction': '[0, 1, 0]'}), '(direction=[0, 1, 0], **ar_kwgs)\n', (5013, 5045), True, 'import pyvista as pv\n'), ((5097, 5137), 'pyvista.Arrow', 'pv.Arrow', ([], {'direction': '[0, 0, 1]'}), '(direction=[0, 0, 1], **ar_kwgs)\n', (5105, 5137), True, 'import pyvista as pv\n'), ((1250, 1263), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1256, 1263), True, 'import numpy as np\n'), ((1270, 1283), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1276, 1283), True, 'import numpy as np\n'), ((1297, 1310), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1303, 1310), True, 'import numpy as np\n'), ((1317, 1330), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1323, 1330), True, 'import numpy as np\n'), ((1723, 1736), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1729, 1736), True, 'import numpy as np\n'), ((1743, 1756), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1749, 1756), True, 'import numpy as np\n'), ((1782, 1795), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1788, 1795), True, 'import numpy as np\n'), ((1802, 1815), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1808, 1815), True, 'import numpy as np\n')] |
from typing import List, Set
from querio.db import data_accessor as da
from querio.ml import model
from querio.service.save_service import SaveService
from querio.ml.expression.cond import Cond
from querio.ml.expression.expression import Expression
from querio.queryobject import QueryObject
from querio.service.utils import get_frequency_count
import logging
class Interface:
"""The base class through which the Querio library is used.
It is recomended to use this class for queries, since it handles all
necessary functions for the user."""
def __init__(self, dbpath, table_name, savepath="", model_params={}):
"""Initialize Interface.
:param dbpath: string
The path to the database in the form
postgres://username:password@DatabaseAddress:Port/DatabaseName
:param table_name:
The name of the table in the database for which this interface is created.
:param savepath: string, optional
The path that you wish to save the files into.
If left blank will be the path from which the program was called.
:param model_params: dict, optional
A keyword arguments dict used to pass arguments to the decision tree
model. See Scikit Learn documentation on decision tree regressors for
accepted parameters and theirs function.
"""
self.table_name = table_name
self.logger = logging.getLogger("QuerioInterface")
self.accessor = da.DataAccessor(dbpath, table_name)
self.model_params = model_params
self.dbpath = dbpath
self.models = {}
self.columns = self.accessor.get_table_column_names()
self.__ss__ = SaveService(savepath)
self._load_models()
def train_all(self):
"""
Use this to train all models that return a numerical value.
Models will contain all features.
Does not generate models that have a string or boolean value as the return type.
Gives all models a name generated by SaveService.generate_querio_name().
"""
row = self.accessor.get_example_row_from_db()
for col in self.columns:
if isinstance(row[col], str) or isinstance(row[col], bool):
continue
features = []
for feat in self.columns:
if col is not feat:
features.append(feat)
model_name = self.__ss__.generate_querio_name(col, features, "")
if model_name not in self.models.keys():
self.train(col,
features,
model_name)
def train(self, query_target: str, features: list, model_name: str):
"""Trains a new model for given data using the features provided.
Arguments:
:param query_target: string
The query_target of the model that will be trained.
:param features: list of string
The column names of features that will be trained for the model
:param model_name
Name of the model
"""
self._validate_columns([query_target])
self._validate_columns(features)
self.logger.info("Training a model for '{}' based on '{}'"
.format(query_target, ", ".join(features)))
self.models[model_name] = model.Model(
self.accessor.get_all_data(),
self.table_name,
model_name,
features,
query_target,
self.dbpath,
self.model_params)
self.__ss__.save_model(self.models[model_name], model_name)
return self.models[model_name]
def object_query(self, q_object: QueryObject, model_name=""):
"""Run new query from models using a QueryObject.
This will run a query from an existing model,
or if no such model is found a new model will be trained
and a query performed with it.
:param q_object: QueryObject
user defined QueryObject.
:param model_name: string
A user can define a string for the model.
If no such model exists it will be trained.
:return:
A Prediction object that contains the predicted mean and variance
of samples matching the given conditions.
"""
return self.expression_query(
q_object.target, q_object.expression, model_name
)
def expression_query(
self, target: str, expression: Expression, model_name=""
):
feature_names = set()
for c in expression:
if isinstance(c, Cond):
feature_names.add(c.feature)
if model_name is "":
model_name = self.__ss__.generate_querio_name(
target, feature_names, ""
)
else:
model_name = self.__ss__.generate_querio_name("", [], model_name)
self._validate_columns(feature_names)
if model_name in self.models:
return self.models[model_name].query(expression)
else:
for model in self.models.values():
if model.output_name == target:
if feature_names == set(model.feature_names):
old_name = model.model_name
model.model_name = model_name
self.__ss__.rename_querio_file(old_name, model_name)
self.models.pop(old_name)
self.models[model_name] = self.__ss__.load_model("", [], model_name)
self.models[model_name].model_name = model_name
self.__ss__.save_model(self.models[model_name], model_name)
return model.query(expression)
elif feature_names.issubset(set(model.feature_names)):
return model.query(expression)
self.logger.info(
"No model for '{}' based on '{}' found. "
"Training a new one...".format(
target, ", ".join(feature_names)
)
)
self.train(target, feature_names, model_name)
return self.models[model_name].query(expression)
def query(self, target: str, conditions: List[Cond], model_name=""):
"""
Makes a query from the name of the target feature and a list of conditions.
The list will be treated as a AND query.
:param target: string
:param conditions: list[Cond]
:return: a prediction object
"""
if len(conditions) == 1:
exp = conditions[0]
else:
if any(not isinstance(cond, Cond) for cond in conditions):
raise TypeError("conditions must be a list of Cond")
exp = conditions[0] & conditions[1]
for i in range(2, len(conditions)):
exp = exp & conditions[i]
return self.expression_query(target, exp, model_name)
def save_models(self):
"""Saves the models of this interface as .querio files in the path
specified by savepath.
These can later be loaded to another interface with the load_models
command."""
for model in self.get_models():
self.__ss__.save_model(model, model.model_name)
def get_models(self):
"""Returns the models in this interface."""
return self.models.values()
def _load_models(self):
"""Loads models from the savepath to the interface.
Will only load models that are from a table with the same name as
current and with the same columns.
Will ignore any files that do not belong to current table.
If two tables share same table name and same column names it will
load the model."""
names = self.__ss__.get_querio_files()
for n in names:
try:
mod = self.__ss__.load_file(n)
features = mod.feature_names
output = mod.output_name
self._validate_columns(features)
self._validate_columns([output])
feature_names = ""
for s in features:
feature_names += s
self.models[n] = mod
except QuerioColumnError:
self.logger.error("""Encountered an error when loading file
'{}'. This model could not be loaded"""
.format(n))
continue
def retrain_models(self):
"""Takes all the models in this interface and retrains them."""
for m in self.get_models():
features = m.get_feature_names()
output = m.output_name
name = m.model_name
self.train(output, features, name)
def clear_models(self):
"""Clears the models in this interface.
Will not delete the save files, but will remove any models in this
interface instance."""
self.models = {}
def clear_saved_models(self):
"""Removes all save files from the save path.
Will not remove files stored in any interface instance, but will
remove all save files."""
self.logger.debug("Clearing all the Querio-files...")
self.__ss__.clear_querio_files()
def get_saved_models(self):
"""
:return: A list containing the names of all save files.
"""
return self.__ss__.get_querio_files()
def frequency(self, values):
data = self.accessor.get_all_data()
if type(values) != list:
values = [values]
self._validate_columns(values)
return get_frequency_count(data, values)
def list_columns(self):
return self.columns
def _validate_columns(self, to_check: List[str]):
for check in to_check:
if check not in self.columns:
self.logger.error("No column called '{}' in database"
.format(check))
raise QuerioColumnError(
"No column called {} in database".format(check))
class QuerioColumnError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, args, kwargs)
| [
"logging.getLogger",
"querio.db.data_accessor.DataAccessor",
"querio.service.save_service.SaveService",
"querio.service.utils.get_frequency_count",
"querio.ml.model.query"
] | [((1483, 1519), 'logging.getLogger', 'logging.getLogger', (['"""QuerioInterface"""'], {}), "('QuerioInterface')\n", (1500, 1519), False, 'import logging\n'), ((1544, 1579), 'querio.db.data_accessor.DataAccessor', 'da.DataAccessor', (['dbpath', 'table_name'], {}), '(dbpath, table_name)\n', (1559, 1579), True, 'from querio.db import data_accessor as da\n'), ((1759, 1780), 'querio.service.save_service.SaveService', 'SaveService', (['savepath'], {}), '(savepath)\n', (1770, 1780), False, 'from querio.service.save_service import SaveService\n'), ((9981, 10014), 'querio.service.utils.get_frequency_count', 'get_frequency_count', (['data', 'values'], {}), '(data, values)\n', (10000, 10014), False, 'from querio.service.utils import get_frequency_count\n'), ((6015, 6038), 'querio.ml.model.query', 'model.query', (['expression'], {}), '(expression)\n', (6026, 6038), False, 'from querio.ml import model\n'), ((6145, 6168), 'querio.ml.model.query', 'model.query', (['expression'], {}), '(expression)\n', (6156, 6168), False, 'from querio.ml import model\n')] |
import json
from requests.models import Response
from types import SimpleNamespace
class ContentType:
def __init__(self, system: dict, elements: dict, api_response: Response):
self.id = system["id"]
self.name = system["name"]
self.codename = system["codename"]
self.last_modified = system["last_modified"]
self.elements = json.loads(json.dumps(elements),
object_hook=lambda d: SimpleNamespace(**d))
self.api_response = api_response
class ContentTypeListing:
def __init__(self, content_types: list, pagination: dict,
api_response: Response):
self.types = content_types
self.pagination = pagination
self.skip = pagination["skip"]
self.limit = pagination["limit"]
self.count = pagination["count"]
self.next_page = pagination["next_page"]
self.api_response = api_response
| [
"json.dumps",
"types.SimpleNamespace"
] | [((379, 399), 'json.dumps', 'json.dumps', (['elements'], {}), '(elements)\n', (389, 399), False, 'import json\n'), ((458, 478), 'types.SimpleNamespace', 'SimpleNamespace', ([], {}), '(**d)\n', (473, 478), False, 'from types import SimpleNamespace\n')] |
from ndl_tense.data_preparation import create_sentence_file,annotate_tenses, prepare_data, prepare_ndl_events, extract_infinitive, extract_ngrams, prepare_ngrams, prepare_cues
#below import commented out for now, uncomment if you want to run step 6
from ndl_tense.simulations import ndl_model
#from ndl_tense.post_processing import top_cues_for_sen, sample_sentences
from ndl_tense import file_tools
from param_file import *
from os import chdir
import logging
logging.basicConfig(level=logging.INFO)
def step_1():
# create folders specified by the list stored in EXTRACT_SNETNECES_DIRS
file_tools.manage_directories(EXTRACT_SENTENCES_DIRS, False)
chdir(WD_EXTRACT) # change working directory to the path in WD_EXTRACT
# create folders specified by the list stored in EXTRACT_SNETNECES_FILES
# the "True" means that the paths in the list are for files and not directories
file_tools.manage_directories(EXTRACT_SENTENCES_FILES, True)
# (The default paramenters are the ones set here)
# create_sentence_file: (list of file paths),
# {dictionary of token:tag pairs to remove from corpus} - # we remove colloquial spelling tokens like "gon", "wan" and "innit" here,
# "True" = create a .tsv of the output,
# True = keep the original sentence | False = "clean" it to be used for training an ndl model
# the final parameter is for verbosity True = print the output of the process as we go along
# The default paramenters are the ones set here
# so this can be run with a call like create_sentence_file.run(EXTRACT_SENTENCES_FILES, {"gon":"VVG", "wan":"VVB", "innit":"VBB"}) and have the same result
create_sentence_file.run(EXTRACT_SENTENCES_FILES, {"gon":"VVG", "wan":"VVB", "innit":"VBB"}, False, False, True)
def step_2():
# create folders specified by the list stored in ANNOTATE_DIRS
file_tools.manage_directories(ANNOTATE_DIRS, False)
chdir(WD_ANNOTATE) # change working directory to the path in WD_EXTRACT
# create folders specified by the list stored in ANNOTATE_FILES
# the "True" means that the paths in the list are for files and not directories
file_tools.manage_directories(ANNOTATE_FILES, True)
# the final parameter is for verbosity (True = print the output of the process as we go along)
annotate_tenses.run(ANNOTATE_FILES, True)
def step_3():
file_tools.manage_directories(PREPDAT_DIRS, False)
chdir(WD_PREPDAT)
file_tools.manage_directories(PREPDAT_FILES, True)
file_tools.manage_directories(PREPARE_TRAIN_VALID_TEST_FILES, True)
#optional
#sample_sentences.run(TENSES_ONE_SENT_PER_VERB_WITH_MODALS, kets, ratios, 500, False)
# the final parameter is for verbosity (True = print the output of the process as we go along)
file_tools.manage_directories(CREATE_TRAIN_VALID_TEST_FILES, True)
prepare_data.run(PREPDAT_FILES, True)
chdir(WD_PREPDAT)
prepare_ndl_events.prepare_files(CREATE_TRAIN_VALID_TEST_FILES, PROP_TEST, PROP_VALID, True)
# the final parameter is for verbosity (True = print the output of the process as we go along)
prepare_ndl_events.run(PREPARE_TRAIN_VALID_TEST_FILES, 'NgramCuesWithInfinitive', True)
def step_4():
file_tools.manage_directories(EXTRACT_SENTENCES_FOLDERS, False)
file_tools.manage_directories(EXTRACT_INFINITIVE_FILES, True)
chdir(WD_EXTRACT_INF)
# the final parameter is for verbosity (True = print the output of the process as we go along)
extract_infinitive.run(EXTRACT_INFINITIVE_FILES, True)
def step_5():
file_tools.manage_directories(NGRAM_FOLDERS, False)
chdir(WD_EXTRACT_NGRAM)
file_tools.manage_directories(NGRAM_FILES, True)
file_tools.manage_directories(TARGETS_FILES, True)
# extracting ngrams by frequency is optional
extract_ngrams.run(TENSES_GZ, NGRAM_FILES, TEMP_DIR_EXT, NUM_THREADS)
# the final parameter is for verbosity (whether to print the output of the process as we go along)
prepare_ngrams.run(NGRAM_FILES, K_NGRAMS, TARGETS_FILES, False)
def step_6():
file_tools.manage_directories([WD_CUES], False)
chdir(WD_CUES)
# the final parameter is for verbosity (whether to print the output of the process as we go along)
prepare_cues.run(NGRAMS, INFINITIVES, ALL_CUES, True)
def step_7():
file_tools.manage_directories(SIM_DIR, False)
chdir(WD_SIM)
ndl_model.run(SIM_FILES, SIM_PARAMS)
def main():
# uncomment by deleting hashtag for each step you wish to complete
#step_1()
#step_2()
#step_3()
#step_4()
#step_5()
#step_6()
step_7() #requires you to uncomment an import line at the top
if __name__ == "__main__":
main() | [
"logging.basicConfig",
"ndl_tense.data_preparation.annotate_tenses.run",
"ndl_tense.data_preparation.prepare_ndl_events.prepare_files",
"ndl_tense.data_preparation.create_sentence_file.run",
"ndl_tense.data_preparation.prepare_ngrams.run",
"ndl_tense.simulations.ndl_model.run",
"os.chdir",
"ndl_tense.... | [((462, 501), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (481, 501), False, 'import logging\n'), ((597, 657), 'ndl_tense.file_tools.manage_directories', 'file_tools.manage_directories', (['EXTRACT_SENTENCES_DIRS', '(False)'], {}), '(EXTRACT_SENTENCES_DIRS, False)\n', (626, 657), False, 'from ndl_tense import file_tools\n'), ((662, 679), 'os.chdir', 'chdir', (['WD_EXTRACT'], {}), '(WD_EXTRACT)\n', (667, 679), False, 'from os import chdir\n'), ((899, 959), 'ndl_tense.file_tools.manage_directories', 'file_tools.manage_directories', (['EXTRACT_SENTENCES_FILES', '(True)'], {}), '(EXTRACT_SENTENCES_FILES, True)\n', (928, 959), False, 'from ndl_tense import file_tools\n'), ((1752, 1871), 'ndl_tense.data_preparation.create_sentence_file.run', 'create_sentence_file.run', (['EXTRACT_SENTENCES_FILES', "{'gon': 'VVG', 'wan': 'VVB', 'innit': 'VBB'}", '(False)', '(False)', '(True)'], {}), "(EXTRACT_SENTENCES_FILES, {'gon': 'VVG', 'wan':\n 'VVB', 'innit': 'VBB'}, False, False, True)\n", (1776, 1871), False, 'from ndl_tense.data_preparation import create_sentence_file, annotate_tenses, prepare_data, prepare_ndl_events, extract_infinitive, extract_ngrams, prepare_ngrams, prepare_cues\n'), ((1951, 2002), 'ndl_tense.file_tools.manage_directories', 'file_tools.manage_directories', (['ANNOTATE_DIRS', '(False)'], {}), '(ANNOTATE_DIRS, False)\n', (1980, 2002), False, 'from ndl_tense import file_tools\n'), ((2007, 2025), 'os.chdir', 'chdir', (['WD_ANNOTATE'], {}), '(WD_ANNOTATE)\n', (2012, 2025), False, 'from os import chdir\n'), ((2236, 2287), 'ndl_tense.file_tools.manage_directories', 'file_tools.manage_directories', (['ANNOTATE_FILES', '(True)'], {}), '(ANNOTATE_FILES, True)\n', (2265, 2287), False, 'from ndl_tense import file_tools\n'), ((2392, 2433), 'ndl_tense.data_preparation.annotate_tenses.run', 'annotate_tenses.run', (['ANNOTATE_FILES', '(True)'], {}), '(ANNOTATE_FILES, True)\n', (2411, 2433), False, 'from ndl_tense.data_preparation import create_sentence_file, annotate_tenses, prepare_data, prepare_ndl_events, extract_infinitive, extract_ngrams, prepare_ngrams, prepare_cues\n'), ((2453, 2503), 'ndl_tense.file_tools.manage_directories', 'file_tools.manage_directories', (['PREPDAT_DIRS', '(False)'], {}), '(PREPDAT_DIRS, False)\n', (2482, 2503), False, 'from ndl_tense import file_tools\n'), ((2508, 2525), 'os.chdir', 'chdir', (['WD_PREPDAT'], {}), '(WD_PREPDAT)\n', (2513, 2525), False, 'from os import chdir\n'), ((2530, 2580), 'ndl_tense.file_tools.manage_directories', 'file_tools.manage_directories', (['PREPDAT_FILES', '(True)'], {}), '(PREPDAT_FILES, True)\n', (2559, 2580), False, 'from ndl_tense import file_tools\n'), ((2585, 2652), 'ndl_tense.file_tools.manage_directories', 'file_tools.manage_directories', (['PREPARE_TRAIN_VALID_TEST_FILES', '(True)'], {}), '(PREPARE_TRAIN_VALID_TEST_FILES, True)\n', (2614, 2652), False, 'from ndl_tense import file_tools\n'), ((2863, 2929), 'ndl_tense.file_tools.manage_directories', 'file_tools.manage_directories', (['CREATE_TRAIN_VALID_TEST_FILES', '(True)'], {}), '(CREATE_TRAIN_VALID_TEST_FILES, True)\n', (2892, 2929), False, 'from ndl_tense import file_tools\n'), ((2935, 2972), 'ndl_tense.data_preparation.prepare_data.run', 'prepare_data.run', (['PREPDAT_FILES', '(True)'], {}), '(PREPDAT_FILES, True)\n', (2951, 2972), False, 'from ndl_tense.data_preparation import create_sentence_file, annotate_tenses, prepare_data, prepare_ndl_events, extract_infinitive, extract_ngrams, prepare_ngrams, prepare_cues\n'), ((2977, 2994), 'os.chdir', 'chdir', (['WD_PREPDAT'], {}), '(WD_PREPDAT)\n', (2982, 2994), False, 'from os import chdir\n'), ((2999, 3095), 'ndl_tense.data_preparation.prepare_ndl_events.prepare_files', 'prepare_ndl_events.prepare_files', (['CREATE_TRAIN_VALID_TEST_FILES', 'PROP_TEST', 'PROP_VALID', '(True)'], {}), '(CREATE_TRAIN_VALID_TEST_FILES, PROP_TEST,\n PROP_VALID, True)\n', (3031, 3095), False, 'from ndl_tense.data_preparation import create_sentence_file, annotate_tenses, prepare_data, prepare_ndl_events, extract_infinitive, extract_ngrams, prepare_ngrams, prepare_cues\n'), ((3197, 3288), 'ndl_tense.data_preparation.prepare_ndl_events.run', 'prepare_ndl_events.run', (['PREPARE_TRAIN_VALID_TEST_FILES', '"""NgramCuesWithInfinitive"""', '(True)'], {}), "(PREPARE_TRAIN_VALID_TEST_FILES,\n 'NgramCuesWithInfinitive', True)\n", (3219, 3288), False, 'from ndl_tense.data_preparation import create_sentence_file, annotate_tenses, prepare_data, prepare_ndl_events, extract_infinitive, extract_ngrams, prepare_ngrams, prepare_cues\n'), ((3304, 3367), 'ndl_tense.file_tools.manage_directories', 'file_tools.manage_directories', (['EXTRACT_SENTENCES_FOLDERS', '(False)'], {}), '(EXTRACT_SENTENCES_FOLDERS, False)\n', (3333, 3367), False, 'from ndl_tense import file_tools\n'), ((3372, 3433), 'ndl_tense.file_tools.manage_directories', 'file_tools.manage_directories', (['EXTRACT_INFINITIVE_FILES', '(True)'], {}), '(EXTRACT_INFINITIVE_FILES, True)\n', (3401, 3433), False, 'from ndl_tense import file_tools\n'), ((3438, 3459), 'os.chdir', 'chdir', (['WD_EXTRACT_INF'], {}), '(WD_EXTRACT_INF)\n', (3443, 3459), False, 'from os import chdir\n'), ((3564, 3618), 'ndl_tense.data_preparation.extract_infinitive.run', 'extract_infinitive.run', (['EXTRACT_INFINITIVE_FILES', '(True)'], {}), '(EXTRACT_INFINITIVE_FILES, True)\n', (3586, 3618), False, 'from ndl_tense.data_preparation import create_sentence_file, annotate_tenses, prepare_data, prepare_ndl_events, extract_infinitive, extract_ngrams, prepare_ngrams, prepare_cues\n'), ((3638, 3689), 'ndl_tense.file_tools.manage_directories', 'file_tools.manage_directories', (['NGRAM_FOLDERS', '(False)'], {}), '(NGRAM_FOLDERS, False)\n', (3667, 3689), False, 'from ndl_tense import file_tools\n'), ((3694, 3717), 'os.chdir', 'chdir', (['WD_EXTRACT_NGRAM'], {}), '(WD_EXTRACT_NGRAM)\n', (3699, 3717), False, 'from os import chdir\n'), ((3722, 3770), 'ndl_tense.file_tools.manage_directories', 'file_tools.manage_directories', (['NGRAM_FILES', '(True)'], {}), '(NGRAM_FILES, True)\n', (3751, 3770), False, 'from ndl_tense import file_tools\n'), ((3775, 3825), 'ndl_tense.file_tools.manage_directories', 'file_tools.manage_directories', (['TARGETS_FILES', '(True)'], {}), '(TARGETS_FILES, True)\n', (3804, 3825), False, 'from ndl_tense import file_tools\n'), ((3880, 3949), 'ndl_tense.data_preparation.extract_ngrams.run', 'extract_ngrams.run', (['TENSES_GZ', 'NGRAM_FILES', 'TEMP_DIR_EXT', 'NUM_THREADS'], {}), '(TENSES_GZ, NGRAM_FILES, TEMP_DIR_EXT, NUM_THREADS)\n', (3898, 3949), False, 'from ndl_tense.data_preparation import create_sentence_file, annotate_tenses, prepare_data, prepare_ndl_events, extract_infinitive, extract_ngrams, prepare_ngrams, prepare_cues\n'), ((4058, 4121), 'ndl_tense.data_preparation.prepare_ngrams.run', 'prepare_ngrams.run', (['NGRAM_FILES', 'K_NGRAMS', 'TARGETS_FILES', '(False)'], {}), '(NGRAM_FILES, K_NGRAMS, TARGETS_FILES, False)\n', (4076, 4121), False, 'from ndl_tense.data_preparation import create_sentence_file, annotate_tenses, prepare_data, prepare_ndl_events, extract_infinitive, extract_ngrams, prepare_ngrams, prepare_cues\n'), ((4141, 4188), 'ndl_tense.file_tools.manage_directories', 'file_tools.manage_directories', (['[WD_CUES]', '(False)'], {}), '([WD_CUES], False)\n', (4170, 4188), False, 'from ndl_tense import file_tools\n'), ((4193, 4207), 'os.chdir', 'chdir', (['WD_CUES'], {}), '(WD_CUES)\n', (4198, 4207), False, 'from os import chdir\n'), ((4316, 4369), 'ndl_tense.data_preparation.prepare_cues.run', 'prepare_cues.run', (['NGRAMS', 'INFINITIVES', 'ALL_CUES', '(True)'], {}), '(NGRAMS, INFINITIVES, ALL_CUES, True)\n', (4332, 4369), False, 'from ndl_tense.data_preparation import create_sentence_file, annotate_tenses, prepare_data, prepare_ndl_events, extract_infinitive, extract_ngrams, prepare_ngrams, prepare_cues\n'), ((4389, 4434), 'ndl_tense.file_tools.manage_directories', 'file_tools.manage_directories', (['SIM_DIR', '(False)'], {}), '(SIM_DIR, False)\n', (4418, 4434), False, 'from ndl_tense import file_tools\n'), ((4439, 4452), 'os.chdir', 'chdir', (['WD_SIM'], {}), '(WD_SIM)\n', (4444, 4452), False, 'from os import chdir\n'), ((4457, 4493), 'ndl_tense.simulations.ndl_model.run', 'ndl_model.run', (['SIM_FILES', 'SIM_PARAMS'], {}), '(SIM_FILES, SIM_PARAMS)\n', (4470, 4493), False, 'from ndl_tense.simulations import ndl_model\n')] |
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import SocketServer
import json
import cgi
class Server(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
self.send_header("Access-Control-Allow-Headers", "X-Requested-With")
self.end_headers()
def do_HEAD(self):
print("head")
self._set_headers()
# GET sends back a Hello world message
def do_GET(self):
print("get")
self.send_response(200)
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
self.send_header("Access-Control-Allow-Headers", "Content-Type")
self.end_headers()
# not sure about this part below
with open('./cafes.json') as f:
cafe_num = json.load(f)
self.wfile.write(json.dumps(cafe_num))
return {"fdassdfasdfdasfas"}
# POST echoes the message adding a JSON field
def do_POST(self):
self.send_response(200)
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
self.send_header("Access-Control-Allow-Headers", "Content-Type")
self.end_headers()
# not sure about this part below
with open('./cafes.json') as f:
cafe_num = json.load(f)
cafe_num=cafe_num-1;
f = open("./cafes.json", "w")
f.write(str(cafe_num))
self.wfile.write(json.dumps(cafe_num))
return {"fdassdfasdfdasfas"}
def do_OPTIONS(self):
print('options')
self.send_response(200, "ok")
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'GET, OPTIONS')
self.send_header("Access-Control-Allow-Headers", "X-Requested-With")
self.send_header("Access-Control-Allow-Headers", "Content-Type")
self.end_headers()
def run(server_class=HTTPServer, handler_class=Server, port=8008):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
print('Starting httpd on port %d...' % port)
httpd.serve_forever()
if __name__ == "__main__":
from sys import argv
if len(argv) == 2:
run(port=int(argv[1]))
else:
run()
| [
"json.load",
"json.dumps"
] | [((1067, 1079), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1076, 1079), False, 'import json\n'), ((1105, 1125), 'json.dumps', 'json.dumps', (['cafe_num'], {}), '(cafe_num)\n', (1115, 1125), False, 'import json\n'), ((1623, 1635), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1632, 1635), False, 'import json\n'), ((1759, 1779), 'json.dumps', 'json.dumps', (['cafe_num'], {}), '(cafe_num)\n', (1769, 1779), False, 'import json\n')] |
# Copyright 2018 luozhouyang.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from . import collections
from . import utils
class InitHook(tf.train.SessionRunHook):
def after_create_session(self, session, coord):
table_init_op = tf.get_collection(tf.GraphKeys.TABLE_INITIALIZERS)
session.run(table_init_op)
variable_init_op = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
session.run(variable_init_op)
class SaveEvaluationPredictionHook(tf.train.SessionRunHook):
def __init__(self, output_file):
self._output_file = output_file
self._predictions = None
self._global_steps = None
def begin(self):
self._predictions = collections.get_dict_from_collection("predictions")
if not self._predictions:
return
self._global_steps = tf.train.get_global_step()
if not self._global_steps:
raise ValueError("You must create global_steps first.")
def before_run(self, run_context):
return tf.train.SessionRunArgs([self._predictions, self._global_steps])
def after_run(self,
run_context, # pylint: disable=unused-argument
run_values):
predictions, global_steps = run_values.results
output_path = "{}.{}".format(self._output_file, global_steps)
with open(output_path, encoding="utf8", mode="a") as file:
tags = predictions['predict_tags']
for t in tags:
tag_string = utils.convert_prediction_tags_to_string(t)
file.write(tag_string + "\n")
def end(self, session):
tf.logging.info(
"Evaluation predictions saved to %s" % self._output_file)
class TrainSummaryHook(tf.train.SessionRunHook):
def __init__(self, output_dir):
self._output_dir = output_dir
self._summary_writer = None
def begin(self):
self._train_loss = tf.get_collection(collections.TRAIN_LOSS)
self._global_step = tf.train.get_global_step()
assert self._global_step is not None
self._summary_writer = tf.summary.FileWriter(self._output_dir)
def before_run(self, run_context):
return tf.train.SessionRunArgs([self._train_loss, self._global_step])
def after_run(self,
run_context, # pylint: disable=unused-argument
run_values):
loss, global_step = run_values.results
loss_summary = tf.Summary(
value=[tf.Summary.Value(tag=collections.TRAIN_LOSS,
simple_value=loss[0])])
self._summary_writer.add_summary(loss_summary, global_step)
class EvalSummaryHook(tf.train.SessionRunHook):
def __init__(self, output_dir):
self._output_dir = output_dir
self._summary_writer = None
def begin(self):
self._eval_loss = tf.get_collection(collections.EVAL_LOSS)
self._global_steps = tf.train.get_global_step()
assert self._global_steps is not None
self._summary_writer = tf.summary.FileWriter(self._output_dir)
def before_run(self, run_context):
return tf.train.SessionRunArgs([self._eval_loss, self._global_steps])
def after_run(self,
run_context, # pylint: disable=unused-argument
run_values):
loss, global_steps = run_values.results
loss_summary = tf.Summary(
value=[tf.Summary.Value(tag=collections.EVAL_LOSS,
simple_value=loss[0])])
self._summary_writer.add_summary(loss_summary, global_steps)
| [
"tensorflow.logging.info",
"tensorflow.train.SessionRunArgs",
"tensorflow.train.get_global_step",
"tensorflow.Summary.Value",
"tensorflow.summary.FileWriter",
"tensorflow.get_collection"
] | [((849, 899), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TABLE_INITIALIZERS'], {}), '(tf.GraphKeys.TABLE_INITIALIZERS)\n', (866, 899), True, 'import tensorflow as tf\n'), ((962, 1010), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {}), '(tf.GraphKeys.GLOBAL_VARIABLES)\n', (979, 1010), True, 'import tensorflow as tf\n'), ((1441, 1467), 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (1465, 1467), True, 'import tensorflow as tf\n'), ((1626, 1690), 'tensorflow.train.SessionRunArgs', 'tf.train.SessionRunArgs', (['[self._predictions, self._global_steps]'], {}), '([self._predictions, self._global_steps])\n', (1649, 1690), True, 'import tensorflow as tf\n'), ((2234, 2307), 'tensorflow.logging.info', 'tf.logging.info', (["('Evaluation predictions saved to %s' % self._output_file)"], {}), "('Evaluation predictions saved to %s' % self._output_file)\n", (2249, 2307), True, 'import tensorflow as tf\n'), ((2532, 2573), 'tensorflow.get_collection', 'tf.get_collection', (['collections.TRAIN_LOSS'], {}), '(collections.TRAIN_LOSS)\n', (2549, 2573), True, 'import tensorflow as tf\n'), ((2602, 2628), 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (2626, 2628), True, 'import tensorflow as tf\n'), ((2705, 2744), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['self._output_dir'], {}), '(self._output_dir)\n', (2726, 2744), True, 'import tensorflow as tf\n'), ((2800, 2862), 'tensorflow.train.SessionRunArgs', 'tf.train.SessionRunArgs', (['[self._train_loss, self._global_step]'], {}), '([self._train_loss, self._global_step])\n', (2823, 2862), True, 'import tensorflow as tf\n'), ((3468, 3508), 'tensorflow.get_collection', 'tf.get_collection', (['collections.EVAL_LOSS'], {}), '(collections.EVAL_LOSS)\n', (3485, 3508), True, 'import tensorflow as tf\n'), ((3538, 3564), 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (3562, 3564), True, 'import tensorflow as tf\n'), ((3642, 3681), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['self._output_dir'], {}), '(self._output_dir)\n', (3663, 3681), True, 'import tensorflow as tf\n'), ((3737, 3799), 'tensorflow.train.SessionRunArgs', 'tf.train.SessionRunArgs', (['[self._eval_loss, self._global_steps]'], {}), '([self._eval_loss, self._global_steps])\n', (3760, 3799), True, 'import tensorflow as tf\n'), ((3086, 3152), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': 'collections.TRAIN_LOSS', 'simple_value': 'loss[0]'}), '(tag=collections.TRAIN_LOSS, simple_value=loss[0])\n', (3102, 3152), True, 'import tensorflow as tf\n'), ((4024, 4089), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': 'collections.EVAL_LOSS', 'simple_value': 'loss[0]'}), '(tag=collections.EVAL_LOSS, simple_value=loss[0])\n', (4040, 4089), True, 'import tensorflow as tf\n')] |
import sys
import pytest
from takler.core import Limit, Flow, Task
from takler.visitor import pre_order_travel, PrintVisitor
def test_create_limit():
limit_count = 20
limit_name = "post_limit"
limit = Limit(limit_name, limit_count)
assert limit.name == limit_name
assert limit.limit == limit_count
assert limit.value == 0
def test_limit_increment():
limit = Limit("post_limit", 2)
assert len(limit.node_paths) == 0
limit.increment(1, "/flow1/task1")
assert len(limit.node_paths) == 1
limit.increment(1, "/flow1/task2")
assert len(limit.node_paths) == 2
def test_limit_decrement():
limit = Limit("post_limit", 2)
assert len(limit.node_paths) == 0
limit.increment(1, "/flow1/task1")
limit.increment(1, "/flow1/task2")
assert len(limit.node_paths) == 2
limit.decrement(1, "/flow1/task1")
assert len(limit.node_paths) == 1
limit.decrement(1, "/flow1/task2")
assert len(limit.node_paths) == 0
def test_limit_in_limit():
limit = Limit("post_limit", 2)
assert limit.in_limit(1)
assert limit.in_limit(2)
assert not limit.in_limit(3)
limit.increment(1, "/flow1/task1")
assert limit.in_limit(1)
assert not limit.in_limit(2)
assert not limit.in_limit(3)
limit.increment(1, "/flow1/task2")
assert not limit.in_limit(1)
assert not limit.in_limit(2)
assert not limit.in_limit(3)
class ObjectContainer:
pass
@pytest.fixture
def flow_with_limit():
"""
flow1
limit limit1 2
limit limit2 1
inlimit limit1
container1
task1
task2
container2
inlimit limit2
task3
task4
"""
oc = ObjectContainer()
with Flow("flow1") as flow1:
oc.flow1 = flow1
flow1.add_limit("limit1", 2)
flow1.add_limit("limit2", 1)
flow1.add_in_limit("limit1")
with flow1.add_container("container1") as container1:
oc.container1 = container1
with container1.add_task("task1") as task1:
oc.task1 = task1
with container1.add_task("task2") as task2:
oc.task2 = task2
with flow1.add_container("container2") as container2:
oc.container2 = container2
container2.add_in_limit("limit2")
with container2.add_task("task3") as task3:
oc.task3 = task3
with container2.add_task("task4") as task4:
oc.task4 = task4
return oc
def test_flow_with_limit(flow_with_limit):
pre_order_travel(flow_with_limit.flow1, PrintVisitor(sys.stdout))
def test_manual_run(flow_with_limit):
# pre_order_travel(flow_with_limit.flow1, SimplePrintVisitor())
flow1: Flow = flow_with_limit.flow1
task1: Task = flow_with_limit.task1
task2: Task = flow_with_limit.task2
task3: Task = flow_with_limit.task3
task4: Task = flow_with_limit.task4
limit1 = flow1.find_limit("limit1")
assert limit1 is not None
limit2 = flow1.find_limit("limit2")
assert limit2 is not None
flow1.requeue()
assert limit1.value == 0
assert limit2.value == 0
# Task1 run
assert task1.check_in_limit_up()
task1.run()
assert limit1.value == 1
assert limit2.value == 0
task1.init("1001")
assert limit1.value == 1
assert limit2.value == 0
# Task2 run
assert task2.check_in_limit_up()
task2.run()
assert limit1.value == 2
assert limit2.value == 0
assert not task3.check_in_limit_up()
# Task1 finish
task1.complete()
assert limit1.value == 1
assert limit2.value == 0
assert task3.check_in_limit_up()
# Task3 run
task3.run()
assert limit1.value == 2
assert limit2.value == 1
assert not task4.check_in_limit_up()
# Task2 complete
task2.complete()
assert limit1.value == 1
assert limit2.value == 1
assert not task4.check_in_limit_up()
# Task3 complete
task3.complete()
assert limit1.value == 0
assert limit2.value == 0
assert task4.check_in_limit_up()
# Task4 run
task4.run()
assert limit1.value == 1
assert limit2.value == 1
# Task4 complete
task4.complete()
assert limit1.value == 0
assert limit2.value == 0
| [
"takler.core.Flow",
"takler.core.Limit",
"takler.visitor.PrintVisitor"
] | [((217, 247), 'takler.core.Limit', 'Limit', (['limit_name', 'limit_count'], {}), '(limit_name, limit_count)\n', (222, 247), False, 'from takler.core import Limit, Flow, Task\n'), ((392, 414), 'takler.core.Limit', 'Limit', (['"""post_limit"""', '(2)'], {}), "('post_limit', 2)\n", (397, 414), False, 'from takler.core import Limit, Flow, Task\n'), ((651, 673), 'takler.core.Limit', 'Limit', (['"""post_limit"""', '(2)'], {}), "('post_limit', 2)\n", (656, 673), False, 'from takler.core import Limit, Flow, Task\n'), ((1025, 1047), 'takler.core.Limit', 'Limit', (['"""post_limit"""', '(2)'], {}), "('post_limit', 2)\n", (1030, 1047), False, 'from takler.core import Limit, Flow, Task\n'), ((1802, 1815), 'takler.core.Flow', 'Flow', (['"""flow1"""'], {}), "('flow1')\n", (1806, 1815), False, 'from takler.core import Limit, Flow, Task\n'), ((2670, 2694), 'takler.visitor.PrintVisitor', 'PrintVisitor', (['sys.stdout'], {}), '(sys.stdout)\n', (2682, 2694), False, 'from takler.visitor import pre_order_travel, PrintVisitor\n')] |
from flask import Flask, jsonify
from frame import FlaskGRPCPool
from sample.company_pb2_grpc import CompanyServerStub
from google.protobuf.empty_pb2 import Empty
app = Flask(__name__)
flask_grpc = FlaskGRPCPool(app)
flask_grpc.register("company_connection", host="localhost", port=9100)
@app.route("/company")
def get_all_company():
stub = flask_grpc.company_connection.get_stub(CompanyServerStub)
result = stub.GetAllCompany(Empty())
return "ok"
if __name__ == '__main__':
app.run(debug=True)
| [
"frame.FlaskGRPCPool",
"google.protobuf.empty_pb2.Empty",
"flask.Flask"
] | [((171, 186), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (176, 186), False, 'from flask import Flask, jsonify\n'), ((200, 218), 'frame.FlaskGRPCPool', 'FlaskGRPCPool', (['app'], {}), '(app)\n', (213, 218), False, 'from frame import FlaskGRPCPool\n'), ((440, 447), 'google.protobuf.empty_pb2.Empty', 'Empty', ([], {}), '()\n', (445, 447), False, 'from google.protobuf.empty_pb2 import Empty\n')] |
#!/usr/bin/env python3
import logging
import datetime
import sys
from ringConnector import core
logging.getLogger().setLevel(logging.INFO)
devices = core.listAllDevices()
logging.info(f"found devices {devices}")
print ('Argument List:', str(sys.argv))
if len(sys.argv) > 1:
dayToDownloadParam = sys.argv[1]
logging.info(f"will download {dayToDownloadParam}")
dayToDownload = datetime.datetime.strptime(dayToDownloadParam, '%Y-%m-%d').date()
res = core.downloadDaysDingVideos(dayToDownload = dayToDownload)
else:
logging.info('will download today')
res = core.downloadDaysDingVideos()
logging.info(res) | [
"logging.getLogger",
"datetime.datetime.strptime",
"ringConnector.core.listAllDevices",
"ringConnector.core.downloadDaysDingVideos",
"logging.info"
] | [((153, 174), 'ringConnector.core.listAllDevices', 'core.listAllDevices', ([], {}), '()\n', (172, 174), False, 'from ringConnector import core\n'), ((175, 215), 'logging.info', 'logging.info', (['f"""found devices {devices}"""'], {}), "(f'found devices {devices}')\n", (187, 215), False, 'import logging\n'), ((617, 634), 'logging.info', 'logging.info', (['res'], {}), '(res)\n', (629, 634), False, 'import logging\n'), ((321, 372), 'logging.info', 'logging.info', (['f"""will download {dayToDownloadParam}"""'], {}), "(f'will download {dayToDownloadParam}')\n", (333, 372), False, 'import logging\n'), ((469, 525), 'ringConnector.core.downloadDaysDingVideos', 'core.downloadDaysDingVideos', ([], {'dayToDownload': 'dayToDownload'}), '(dayToDownload=dayToDownload)\n', (496, 525), False, 'from ringConnector import core\n'), ((538, 573), 'logging.info', 'logging.info', (['"""will download today"""'], {}), "('will download today')\n", (550, 573), False, 'import logging\n'), ((584, 613), 'ringConnector.core.downloadDaysDingVideos', 'core.downloadDaysDingVideos', ([], {}), '()\n', (611, 613), False, 'from ringConnector import core\n'), ((100, 119), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (117, 119), False, 'import logging\n'), ((393, 451), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['dayToDownloadParam', '"""%Y-%m-%d"""'], {}), "(dayToDownloadParam, '%Y-%m-%d')\n", (419, 451), False, 'import datetime\n')] |
import os
from setuptools import setup, find_packages
base_dir = os.path.dirname(os.path.abspath(__file__))
setup(
name='hcipy',
version="0.0.1",
description="A pure python library for Bluetooth LE that has minimal dependencies.",
#long_description="\n\n".join([
# open(os.path.join(base_dir, "README.md"), "r").read(),
#]),
long_description="A pure Python module written using only the Python standard library for interacting with the Bluetooth HCI.",
url='https://github.com/TheBubbleworks/python-hcipy',
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
license='MIT',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[],
#tests_require=tests_require,
#test_suite="setup.test_suite",
platforms=['Raspberry Pi', 'Linux'],
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=['Development Status :: 2 - Pre-Alpha',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2',
'Environment :: Console',
'Intended Audience :: Developers',
'Operating System :: POSIX',
'Topic :: Software Development :: Libraries :: Python Modules',
],
) | [
"os.path.abspath",
"setuptools.find_packages"
] | [((81, 106), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (96, 106), False, 'import os\n'), ((682, 697), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (695, 697), False, 'from setuptools import setup, find_packages\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-15 19:21
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import hoover.contrib.twofactor.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Invitation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(default=hoover.contrib.twofactor.models.random_code, max_length=200)),
('generated', models.DateTimeField(auto_now_add=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"django.db.models.OneToOneField",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((349, 406), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (380, 406), False, 'from django.db import migrations, models\n'), ((541, 634), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (557, 634), False, 'from django.db import migrations, models\n'), ((658, 747), 'django.db.models.CharField', 'models.CharField', ([], {'default': 'hoover.contrib.twofactor.models.random_code', 'max_length': '(200)'}), '(default=hoover.contrib.twofactor.models.random_code,\n max_length=200)\n', (674, 747), False, 'from django.db import migrations, models\n'), ((776, 815), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (796, 815), False, 'from django.db import migrations, models\n'), ((843, 942), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=\n settings.AUTH_USER_MODEL)\n', (863, 942), False, 'from django.db import migrations, models\n')] |
'''
任务调度器
给你一个用字符数组 tasks 表示的 CPU 需要执行的任务列表。其中每个字母表示一种不同种类的任务。任务可以以任意顺序执行,并且每个任务都可以在 1 个单位时间内执行完。
在任何一个单位时间,CPU 可以完成一个任务,或者处于待命状态。
然而,两个 相同种类 的任务之间必须有长度为整数 n 的冷却时间,因此至少有连续 n 个单位时间内 CPU 在执行不同的任务,或者在待命状态。
你需要计算完成所有任务所需要的 最短时间 。
提示:
1 <= task.length <= 10^4
tasks[i] 是大写英文字母
n 的取值范围为 [0, 100]
'''
from typing import List
'''
思路:哈希+链表计数
任务执行窗口大小为n,每次从待办任务中拿出任务数最多的n个任务依次执行。
可以用哈希+链表实现每n个周期从待办任务中取出次数最多的n个任务。这里偷个懒用collections.counter
时间复杂度:O(m*n)
空间复杂度:O(m)
'''
class Solution:
def leastInterval(self, tasks: List[str], n: int) -> int:
if n == 0:
return len(tasks)
import collections
counter = collections.Counter(tasks)
empty = collections.Counter()
times = 0
while len(counter) > 0:
topTasks = counter.most_common(n + 1)
counter.subtract(map(lambda t: t[0], topTasks))
counter = counter - empty # 去掉为0的任务
if len(counter) > 0:
times += n + 1
else:
times += len(topTasks)
return times
s = Solution()
print(s.leastInterval(tasks=["A", "A", "A", "B", "B", "B"], n=2))
print(s.leastInterval(tasks=["A", "A", "A", "B", "B", "B"], n=0))
print(s.leastInterval(tasks=["A", "A", "A", "A", "A", "A", "B", "C", "D", "E", "F", "G"], n=2))
| [
"collections.Counter"
] | [((635, 661), 'collections.Counter', 'collections.Counter', (['tasks'], {}), '(tasks)\n', (654, 661), False, 'import collections\n'), ((678, 699), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (697, 699), False, 'import collections\n')] |
from chemeco.wrappers.database import sklearn_db
from chemeco.wrappers.database import cheml_db
from chemeco.wrappers.database import pandas_db
import inspect
def tshf():
"""
tshf stands for the combination of task, subtask, host, and function
:return: combination, dictionary of the aforementioned combinations
"""
# 7 tasks
tasks = ['Enter', 'Represent', 'Prepare', 'Model', 'Search', 'Mix', 'Visualize', 'Store']
extras = ['np', '__builtins__', '__doc__', '__file__', '__name__', '__package__', 'mask', 'Input', 'Output',
'Parameter', 'req', 'regression_types', 'cv_classes']
combination = {task: {} for task in tasks}
all_classes = [k[1] for k in inspect.getmembers(sklearn_db) if k[0][0:2]!='__']
all_classes += [k[1] for k in inspect.getmembers(cheml_db) if k[0][0:2]!='__' ]
all_classes += [k[1] for k in inspect.getmembers(pandas_db) if k[0][0:2]!='__' ]
for k in all_classes:
vk = vars(k)
if 'task' in vk and 'subtask' in vk:
task, subtask, host, function = [vk['task'], vk['subtask'], vk['host'], vk['function']]
if subtask not in combination[task]:
combination[task][subtask] = {host: [function]}
else:
if host not in combination[task][subtask]:
combination[task][subtask][host] = [function]
else:
combination[task][subtask][host].append(function)
return tasks, combination
| [
"inspect.getmembers"
] | [((703, 733), 'inspect.getmembers', 'inspect.getmembers', (['sklearn_db'], {}), '(sklearn_db)\n', (721, 733), False, 'import inspect\n'), ((788, 816), 'inspect.getmembers', 'inspect.getmembers', (['cheml_db'], {}), '(cheml_db)\n', (806, 816), False, 'import inspect\n'), ((872, 901), 'inspect.getmembers', 'inspect.getmembers', (['pandas_db'], {}), '(pandas_db)\n', (890, 901), False, 'import inspect\n')] |
import time
from PLS.QuadTree import QuadTree
from PLS.AlgorithmHelper import convertPopulationToFront
from copy import deepcopy
class PLS:
#Problem parameters and functions
def __init__(self, objectsWeights, objectsValues, W, populationGenerator, neighborhoodGenerator, updateFunction):
self.objectsWeights = objectsWeights
self.objectsValues = objectsValues
self.W = W
self.nbCriteria = objectsValues.shape[1]
self.populationGenerator = populationGenerator
self.neighborhoodGenerator = neighborhoodGenerator
self.updateFunction = updateFunction
def runQuad(self, verbose=0):
startTime = time.time()
initialPopulation = self.populationGenerator(self.objectsWeights, self.objectsValues, self.W)
initialPopQuad = QuadTree(self.nbCriteria)
initialPopQuad.bulkInsert(initialPopulation)
population = deepcopy(initialPopQuad)
efficaces = deepcopy(initialPopQuad)
Pa = QuadTree(self.nbCriteria)
it = 0
while population:
if verbose == 1:
print(f"Taille de nouvelle population au temps {it}: {len(population)}")
for solution in population:
for voisin in self.neighborhoodGenerator(solution, self.objectsWeights, self.objectsValues, self.W):
#If voisin is not dominated by solution
if any(solution[1] < voisin[1]):
if self.updateFunction(efficaces, voisin):
self.updateFunction(Pa, voisin)
population = Pa
Pa = QuadTree(self.nbCriteria)
it += 1
return efficaces, time.time() - startTime
def runList(self, verbose=0):
startTime = time.time()
initialPopulation = self.populationGenerator(self.objectsWeights, self.objectsValues, self.W)
population = initialPopulation.copy()
efficaces = initialPopulation.copy()
Pa = []
it = 0
while population:
if verbose == 1:
print(f"Taille de nouvelle population au temps {it}: {len(population)}")
for solution in population:
for voisin in self.neighborhoodGenerator(solution, self.objectsWeights, self.objectsValues, self.W):
#If voisin is not dominated by solution
if not all(solution[1] >= voisin[1]):
if self.updateFunction(efficaces, voisin):
self.updateFunction(Pa, voisin)
population = Pa
Pa = []
it += 1
return efficaces, time.time() - startTime
def runSelection(self, selector, selectorArgs=[], mode="OWA", verbose=0):
startTime = time.time()
prefs = []
initialPopulation = self.populationGenerator(self.objectsWeights, self.objectsValues, self.W)
tempoFront = convertPopulationToFront(initialPopulation, self.nbCriteria)
currentSolutionIndex, questionCount, prefs = selector(tempoFront, *selectorArgs, mode=mode, verbose=0)
currentSolution = initialPopulation[currentSolutionIndex]
it = 0
totalSelectorIt = questionCount
while True:
if verbose == 1:
pass
#print(f"Taille de nouvelle population au temps {it}: {len(population)}")
currentPopulation = [currentSolution] + self.neighborhoodGenerator(currentSolution, self.objectsWeights, self.objectsValues, self.W)
tempoFront = convertPopulationToFront(currentPopulation, self.nbCriteria)
currentSolutionIndex, questionCount, prefs = selector(tempoFront, *selectorArgs, mode=mode, prefs=prefs, verbose=verbose)
totalSelectorIt += questionCount
if verbose == 1:
print(f"Selected solution n°{currentSolutionIndex}: {currentPopulation[currentSolutionIndex][1]}")
#If currentSolutionIndex equal 0, parent is selected
if currentSolutionIndex == 0:
if verbose == 1:
print(f"Solution séléctionné en {it+1} itérations.")
return currentSolution, time.time() - startTime, totalSelectorIt
currentSolution = currentPopulation[currentSolutionIndex]
it += 1
| [
"copy.deepcopy",
"time.time",
"PLS.AlgorithmHelper.convertPopulationToFront",
"PLS.QuadTree.QuadTree"
] | [((622, 633), 'time.time', 'time.time', ([], {}), '()\n', (631, 633), False, 'import time\n'), ((752, 777), 'PLS.QuadTree.QuadTree', 'QuadTree', (['self.nbCriteria'], {}), '(self.nbCriteria)\n', (760, 777), False, 'from PLS.QuadTree import QuadTree\n'), ((843, 867), 'copy.deepcopy', 'deepcopy', (['initialPopQuad'], {}), '(initialPopQuad)\n', (851, 867), False, 'from copy import deepcopy\n'), ((882, 906), 'copy.deepcopy', 'deepcopy', (['initialPopQuad'], {}), '(initialPopQuad)\n', (890, 906), False, 'from copy import deepcopy\n'), ((915, 940), 'PLS.QuadTree.QuadTree', 'QuadTree', (['self.nbCriteria'], {}), '(self.nbCriteria)\n', (923, 940), False, 'from PLS.QuadTree import QuadTree\n'), ((1540, 1551), 'time.time', 'time.time', ([], {}), '()\n', (1549, 1551), False, 'import time\n'), ((2364, 2375), 'time.time', 'time.time', ([], {}), '()\n', (2373, 2375), False, 'import time\n'), ((2503, 2563), 'PLS.AlgorithmHelper.convertPopulationToFront', 'convertPopulationToFront', (['initialPopulation', 'self.nbCriteria'], {}), '(initialPopulation, self.nbCriteria)\n', (2527, 2563), False, 'from PLS.AlgorithmHelper import convertPopulationToFront\n'), ((1409, 1434), 'PLS.QuadTree.QuadTree', 'QuadTree', (['self.nbCriteria'], {}), '(self.nbCriteria)\n', (1417, 1434), False, 'from PLS.QuadTree import QuadTree\n'), ((3052, 3112), 'PLS.AlgorithmHelper.convertPopulationToFront', 'convertPopulationToFront', (['currentPopulation', 'self.nbCriteria'], {}), '(currentPopulation, self.nbCriteria)\n', (3076, 3112), False, 'from PLS.AlgorithmHelper import convertPopulationToFront\n'), ((1468, 1479), 'time.time', 'time.time', ([], {}), '()\n', (1477, 1479), False, 'import time\n'), ((2249, 2260), 'time.time', 'time.time', ([], {}), '()\n', (2258, 2260), False, 'import time\n'), ((3599, 3610), 'time.time', 'time.time', ([], {}), '()\n', (3608, 3610), False, 'import time\n')] |
"""Test the Discovergy config flow."""
from unittest.mock import AsyncMock, MagicMock, patch
from pydiscovergy.error import HTTPError, InvalidLogin
from pydiscovergy.models import AccessToken, ConsumerToken
from homeassistant import setup
from homeassistant.components.discovergy.config_flow import CannotConnect, InvalidAuth
from homeassistant.components.discovergy.const import (
CONF_ACCESS_TOKEN,
CONF_ACCESS_TOKEN_SECRET,
CONF_CONSUMER_KEY,
CONF_CONSUMER_SECRET,
DOMAIN,
)
from homeassistant.config_entries import SOURCE_REAUTH, SOURCE_USER
from homeassistant.const import CONF_EMAIL, CONF_PASSWORD
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from tests.common import MockConfigEntry
def get_discovergy_mock() -> MagicMock:
"""Return a MagicMock Discovergy instance for testing."""
discovergy_mock = MagicMock()
type(discovergy_mock).login = AsyncMock(
return_value=(
AccessToken("rq-test-token", "rq-test-token-secret"),
ConsumerToken("test-key", "test-secret"),
)
)
type(discovergy_mock).get_meters = AsyncMock(return_value=[])
return discovergy_mock
async def test_form(hass: HomeAssistant) -> None:
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with patch("pydiscovergy.Discovergy", return_value=get_discovergy_mock(),), patch(
"homeassistant.components.discovergy.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_EMAIL: "test<EMAIL>",
CONF_PASSWORD: "<PASSWORD>",
},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == "<EMAIL>"
assert result2["data"] == {
CONF_EMAIL: "<EMAIL>",
CONF_PASSWORD: "<PASSWORD>",
CONF_ACCESS_TOKEN: "rq-test-token",
CONF_ACCESS_TOKEN_SECRET: "rq-test-token-secret",
CONF_CONSUMER_KEY: "test-key",
CONF_CONSUMER_SECRET: "test-secret",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass: HomeAssistant) -> None:
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
with patch(
"pydiscovergy.Discovergy.login",
side_effect=InvalidLogin,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_EMAIL: "<EMAIL>",
CONF_PASSWORD: "<PASSWORD>",
},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(hass: HomeAssistant) -> None:
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
with patch("pydiscovergy.Discovergy.login", side_effect=HTTPError):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_EMAIL: "<EMAIL>",
CONF_PASSWORD: "<PASSWORD>",
},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_unknown_exception(hass: HomeAssistant) -> None:
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
with patch("pydiscovergy.Discovergy.login", side_effect=Exception):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_EMAIL: "<EMAIL>",
CONF_PASSWORD: "<PASSWORD>",
},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "unknown"}
async def test_automatic_reauth_flow(
hass: HomeAssistant, mock_config_entry: MockConfigEntry
) -> None:
"""Test the automatic rauth flow."""
mock_config_entry.add_to_hass(hass)
with patch(
"pydiscovergy.Discovergy", return_value=get_discovergy_mock()
) as mock_discovergy, patch(
"homeassistant.components.discovergy.async_setup_entry", return_value=True
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": SOURCE_REAUTH,
"unique_id": mock_config_entry.unique_id,
"entry_id": mock_config_entry.entry_id,
},
data=mock_config_entry.data,
)
await hass.async_block_till_done()
assert result.get("type") == RESULT_TYPE_ABORT
assert result.get("reason") == "reauth_successful"
assert mock_discovergy.call_count == 1
assert mock_setup_entry.call_count == 1
async def test_automatic_reauth_flow_missing_entry(hass: HomeAssistant) -> None:
"""Test the automatic rauth flow if it is missing the config entry."""
with patch("pydiscovergy.Discovergy", return_value=get_discovergy_mock()), patch(
"homeassistant.components.discovergy.async_setup_entry", return_value=True
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": SOURCE_REAUTH,
"unique_id": "abc123xyz",
"entry_id": "abc123",
},
data={},
)
await hass.async_block_till_done()
assert result.get("type") == RESULT_TYPE_FORM
async def test_automatic_reauth_flow_connection_error(
hass: HomeAssistant, mock_config_entry: MockConfigEntry
) -> None:
"""Test the automatic reauth flow if a connection error is raised."""
mock_config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.discovergy.config_flow.validate_input",
side_effect=CannotConnect,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": SOURCE_REAUTH,
"unique_id": mock_config_entry.unique_id,
"entry_id": mock_config_entry.entry_id,
},
data=mock_config_entry.data,
)
assert result.get("type") == RESULT_TYPE_FORM
assert result.get("step_id") == "reauth_confirm"
assert "flow_id" in result
async def test_automatic_reauth_flow_invalid_auth(
hass: HomeAssistant, mock_config_entry: MockConfigEntry
) -> None:
"""Test the automatic reauth flow if a invalid auth error is raised."""
mock_config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.discovergy.config_flow.validate_input",
side_effect=InvalidAuth,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": SOURCE_REAUTH,
"unique_id": mock_config_entry.unique_id,
"entry_id": mock_config_entry.entry_id,
},
data=mock_config_entry.data,
)
assert result.get("type") == RESULT_TYPE_FORM
assert result.get("step_id") == "reauth_confirm"
assert "flow_id" in result
async def test_manual_reauth_flow(
hass: HomeAssistant, mock_config_entry: MockConfigEntry
) -> None:
"""Test the manual reauth flow."""
mock_config_entry.add_to_hass(hass)
# check automatic re-auth flow with InvalidLogin exception raised
with patch("pydiscovergy.Discovergy.login", side_effec=InvalidLogin):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": SOURCE_REAUTH,
"unique_id": mock_config_entry.unique_id,
"entry_id": mock_config_entry.entry_id,
},
data=mock_config_entry.data,
)
await hass.async_block_till_done()
assert result.get("type") == RESULT_TYPE_FORM
assert result.get("step_id") == "reauth_confirm"
assert "flow_id" in result
# now check reauth_confirm flow with supplied password
with patch("pydiscovergy.Discovergy", return_value=get_discovergy_mock()), patch(
"homeassistant.components.discovergy.async_setup_entry", return_value=True
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PASSWORD: "<PASSWORD>"},
)
await hass.async_block_till_done()
assert result2.get("type") == RESULT_TYPE_ABORT
assert result2.get("reason") == "reauth_successful"
| [
"homeassistant.setup.async_setup_component",
"pydiscovergy.models.ConsumerToken",
"unittest.mock.MagicMock",
"pydiscovergy.models.AccessToken",
"unittest.mock.AsyncMock",
"unittest.mock.patch"
] | [((960, 971), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (969, 971), False, 'from unittest.mock import AsyncMock, MagicMock, patch\n'), ((1215, 1241), 'unittest.mock.AsyncMock', 'AsyncMock', ([], {'return_value': '[]'}), '(return_value=[])\n', (1224, 1241), False, 'from unittest.mock import AsyncMock, MagicMock, patch\n'), ((1363, 1427), 'homeassistant.setup.async_setup_component', 'setup.async_setup_component', (['hass', '"""persistent_notification"""', '{}'], {}), "(hass, 'persistent_notification', {})\n", (1390, 1427), False, 'from homeassistant import setup\n'), ((1701, 1787), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.discovergy.async_setup_entry"""'], {'return_value': '(True)'}), "('homeassistant.components.discovergy.async_setup_entry', return_value\n =True)\n", (1706, 1787), False, 'from unittest.mock import AsyncMock, MagicMock, patch\n'), ((2756, 2820), 'unittest.mock.patch', 'patch', (['"""pydiscovergy.Discovergy.login"""'], {'side_effect': 'InvalidLogin'}), "('pydiscovergy.Discovergy.login', side_effect=InvalidLogin)\n", (2761, 2820), False, 'from unittest.mock import AsyncMock, MagicMock, patch\n'), ((3404, 3465), 'unittest.mock.patch', 'patch', (['"""pydiscovergy.Discovergy.login"""'], {'side_effect': 'HTTPError'}), "('pydiscovergy.Discovergy.login', side_effect=HTTPError)\n", (3409, 3465), False, 'from unittest.mock import AsyncMock, MagicMock, patch\n'), ((4031, 4092), 'unittest.mock.patch', 'patch', (['"""pydiscovergy.Discovergy.login"""'], {'side_effect': 'Exception'}), "('pydiscovergy.Discovergy.login', side_effect=Exception)\n", (4036, 4092), False, 'from unittest.mock import AsyncMock, MagicMock, patch\n'), ((4719, 4805), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.discovergy.async_setup_entry"""'], {'return_value': '(True)'}), "('homeassistant.components.discovergy.async_setup_entry', return_value\n =True)\n", (4724, 4805), False, 'from unittest.mock import AsyncMock, MagicMock, patch\n'), ((5634, 5720), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.discovergy.async_setup_entry"""'], {'return_value': '(True)'}), "('homeassistant.components.discovergy.async_setup_entry', return_value\n =True)\n", (5639, 5720), False, 'from unittest.mock import AsyncMock, MagicMock, patch\n'), ((6346, 6448), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.discovergy.config_flow.validate_input"""'], {'side_effect': 'CannotConnect'}), "('homeassistant.components.discovergy.config_flow.validate_input',\n side_effect=CannotConnect)\n", (6351, 6448), False, 'from unittest.mock import AsyncMock, MagicMock, patch\n'), ((7177, 7277), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.discovergy.config_flow.validate_input"""'], {'side_effect': 'InvalidAuth'}), "('homeassistant.components.discovergy.config_flow.validate_input',\n side_effect=InvalidAuth)\n", (7182, 7277), False, 'from unittest.mock import AsyncMock, MagicMock, patch\n'), ((8023, 8086), 'unittest.mock.patch', 'patch', (['"""pydiscovergy.Discovergy.login"""'], {'side_effec': 'InvalidLogin'}), "('pydiscovergy.Discovergy.login', side_effec=InvalidLogin)\n", (8028, 8086), False, 'from unittest.mock import AsyncMock, MagicMock, patch\n'), ((8728, 8814), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.discovergy.async_setup_entry"""'], {'return_value': '(True)'}), "('homeassistant.components.discovergy.async_setup_entry', return_value\n =True)\n", (8733, 8814), False, 'from unittest.mock import AsyncMock, MagicMock, patch\n'), ((1052, 1104), 'pydiscovergy.models.AccessToken', 'AccessToken', (['"""rq-test-token"""', '"""rq-test-token-secret"""'], {}), "('rq-test-token', 'rq-test-token-secret')\n", (1063, 1104), False, 'from pydiscovergy.models import AccessToken, ConsumerToken\n'), ((1118, 1158), 'pydiscovergy.models.ConsumerToken', 'ConsumerToken', (['"""test-key"""', '"""test-secret"""'], {}), "('test-key', 'test-secret')\n", (1131, 1158), False, 'from pydiscovergy.models import AccessToken, ConsumerToken\n')] |
import logging
import click
from kfp import dsl
from typing import List, Dict, Callable
import kfp.dsl as dsl
from hypermodel.hml.hml_pipeline import HmlPipeline
from hypermodel.hml.hml_container_op import HmlContainerOp
from hypermodel.platform.abstract.services import PlatformServicesBase
@click.group(name="pipelines")
@click.pass_context
def cli_pipeline_group(context):
pass
class HmlPipelineApp:
def __init__(
self,
name: str,
services: PlatformServicesBase,
cli: click.Group,
image_url: str,
package_entrypoint: str,
envs: Dict[str, str]
):
if name is None or name == "":
raise(TypeError("Parameter: `name` must be supplied"))
if services is None:
raise(TypeError("Parameter: `services` must be supplied"))
if cli is None:
raise(TypeError("Parameter: `cli` must be supplied"))
self.name = name
self.services = services
self.cli_root = cli
self.cli_root.add_command(cli_pipeline_group)
self.envs = envs
self.image_url = image_url
self.package_entrypoint = package_entrypoint
self.pipelines: Dict[str, HmlPipeline] = dict()
self.deploy_callbacks: List[Callable[[HmlContainerOp], HmlContainerOp]] = []
def __getitem__(self, key: str) -> HmlPipeline:
"""
Get a reference to a `HmlPipeline` added to this pipeline
via a call to `self.pipelines`
"""
return self.pipelines[key]
def register_pipeline(self, pipeline_func, cron: str, experiment: str):
"""
Register a Kubeflow Pipeline (e.g. a function decorated with @hml.pipeline)
Args:
pipeline_func (Callable): The function defining the pipline
cron (str): A cron expression for the default job executing this pipelines
experiment (str): The kubeflow experiment to deploy the job to
Returns:
Nonw
"""
pipe = HmlPipeline(
cli=cli_pipeline_group,
pipeline_func=pipeline_func,
services=self.services,
image_url=self.image_url,
package_entrypoint=self.package_entrypoint,
op_builders=self.deploy_callbacks,
envs=self.envs
)
pipe.with_cron(cron)
pipe.with_experiment(experiment)
self.pipelines[pipe.name] = pipe
return pipe
def initialize(self):
for k in self.pipelines:
pipe = self.pipelines[k]
pipe._build_dag()
def on_deploy(self, func: Callable[[HmlContainerOp], HmlContainerOp]):
"""
Registers a function to be called for each ContainerOp defined in the Pipeline
to enable us to configure the Operations within the container with secrets,
environment variables and whatever else may be required.
Args:
func (Callable): The function (accepting a HmlContainerOp as its only parameter)
which configure the supplied HmlContainerOp
"""
self.deploy_callbacks.append(func)
return self
| [
"click.group",
"hypermodel.hml.hml_pipeline.HmlPipeline"
] | [((297, 326), 'click.group', 'click.group', ([], {'name': '"""pipelines"""'}), "(name='pipelines')\n", (308, 326), False, 'import click\n'), ((2020, 2239), 'hypermodel.hml.hml_pipeline.HmlPipeline', 'HmlPipeline', ([], {'cli': 'cli_pipeline_group', 'pipeline_func': 'pipeline_func', 'services': 'self.services', 'image_url': 'self.image_url', 'package_entrypoint': 'self.package_entrypoint', 'op_builders': 'self.deploy_callbacks', 'envs': 'self.envs'}), '(cli=cli_pipeline_group, pipeline_func=pipeline_func, services=\n self.services, image_url=self.image_url, package_entrypoint=self.\n package_entrypoint, op_builders=self.deploy_callbacks, envs=self.envs)\n', (2031, 2239), False, 'from hypermodel.hml.hml_pipeline import HmlPipeline\n')] |
#encoding=utf8
## 参考https://blog.csdn.net/dengxing1234/article/details/73739836
import xgboost as xgb
from sklearn.datasets import load_svmlight_file
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_curve, auc, roc_auc_score
from sklearn.externals import joblib
import numpy as np
from scipy.sparse import hstack
from sklearn.preprocessing.data import OneHotEncoder
def xgboost_lr_train(libsvmFileNameInitial):
# load样本数据
X_all, y_all = load_svmlight_file(libsvmFileNameInitial)
# 训练/测试数据分割
X_train, X_test, y_train, y_test = train_test_split(X_all, y_all, test_size = 0.3, random_state = 42)
# 定义xgb模型
xgboost = xgb.XGBClassifier(nthread=4, learning_rate=0.08,
n_estimators=50, max_depth=5, gamma=0, subsample=0.9, colsample_bytree=0.5)
# 训练xgb学习
xgboost.fit(X_train, y_train)
# 预测xgb及AUC评测
y_pred_test = xgboost.predict_proba(X_test)[:, 1]
xgb_test_auc = roc_auc_score(y_test, y_pred_test)
print('xgboost test auc: %.5f' % xgb_test_auc)
# xgboost编码原有特征
X_train_leaves = xgboost.apply(X_train)
X_test_leaves = xgboost.apply(X_test)
# 合并编码后的训练数据和测试数据
All_leaves = np.concatenate((X_train_leaves, X_test_leaves), axis=0)
All_leaves = All_leaves.astype(np.int32)
# 对所有特征进行ont-hot编码
xgbenc = OneHotEncoder()
X_trans = xgbenc.fit_transform(All_leaves)
(train_rows, cols) = X_train_leaves.shape
# 定义LR模型
lr = LogisticRegression()
# lr对xgboost特征编码后的样本模型训练
lr.fit(X_trans[:train_rows, :], y_train)
# 预测及AUC评测
y_pred_xgblr1 = lr.predict_proba(X_trans[train_rows:, :])[:, 1]
xgb_lr_auc1 = roc_auc_score(y_test, y_pred_xgblr1)
print('基于Xgb特征编码后的LR AUC: %.5f' % xgb_lr_auc1)
# 定义LR模型
lr = LogisticRegression(n_jobs=-1)
# 组合特征
X_train_ext = hstack([X_trans[:train_rows, :], X_train])
X_test_ext = hstack([X_trans[train_rows:, :], X_test])
# lr对组合特征的样本模型训练
lr.fit(X_train_ext, y_train)
# 预测及AUC评测
y_pred_xgblr2 = lr.predict_proba(X_test_ext)[:, 1]
xgb_lr_auc2 = roc_auc_score(y_test, y_pred_xgblr2)
print('基于组合特征的LR AUC: %.5f' % xgb_lr_auc2)
if __name__ == '__main__':
xgboost_lr_train("data/sample_libsvm_data.txt")
| [
"sklearn.datasets.load_svmlight_file",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.roc_auc_score",
"sklearn.linear_model.LogisticRegression",
"sklearn.preprocessing.data.OneHotEncoder",
"scipy.sparse.hstack",
"numpy.concatenate",
"xgboost.XGBClassifier"
] | [((536, 577), 'sklearn.datasets.load_svmlight_file', 'load_svmlight_file', (['libsvmFileNameInitial'], {}), '(libsvmFileNameInitial)\n', (554, 577), False, 'from sklearn.datasets import load_svmlight_file\n'), ((634, 696), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_all', 'y_all'], {'test_size': '(0.3)', 'random_state': '(42)'}), '(X_all, y_all, test_size=0.3, random_state=42)\n', (650, 696), False, 'from sklearn.model_selection import train_test_split\n'), ((730, 859), 'xgboost.XGBClassifier', 'xgb.XGBClassifier', ([], {'nthread': '(4)', 'learning_rate': '(0.08)', 'n_estimators': '(50)', 'max_depth': '(5)', 'gamma': '(0)', 'subsample': '(0.9)', 'colsample_bytree': '(0.5)'}), '(nthread=4, learning_rate=0.08, n_estimators=50, max_depth\n =5, gamma=0, subsample=0.9, colsample_bytree=0.5)\n', (747, 859), True, 'import xgboost as xgb\n'), ((1023, 1057), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_pred_test'], {}), '(y_test, y_pred_test)\n', (1036, 1057), False, 'from sklearn.metrics import roc_curve, auc, roc_auc_score\n'), ((1257, 1312), 'numpy.concatenate', 'np.concatenate', (['(X_train_leaves, X_test_leaves)'], {'axis': '(0)'}), '((X_train_leaves, X_test_leaves), axis=0)\n', (1271, 1312), True, 'import numpy as np\n'), ((1395, 1410), 'sklearn.preprocessing.data.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (1408, 1410), False, 'from sklearn.preprocessing.data import OneHotEncoder\n'), ((1528, 1548), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (1546, 1548), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1724, 1760), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_pred_xgblr1'], {}), '(y_test, y_pred_xgblr1)\n', (1737, 1760), False, 'from sklearn.metrics import roc_curve, auc, roc_auc_score\n'), ((1835, 1864), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (1853, 1864), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1894, 1936), 'scipy.sparse.hstack', 'hstack', (['[X_trans[:train_rows, :], X_train]'], {}), '([X_trans[:train_rows, :], X_train])\n', (1900, 1936), False, 'from scipy.sparse import hstack\n'), ((1954, 1995), 'scipy.sparse.hstack', 'hstack', (['[X_trans[train_rows:, :], X_test]'], {}), '([X_trans[train_rows:, :], X_test])\n', (1960, 1995), False, 'from scipy.sparse import hstack\n'), ((2140, 2176), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_pred_xgblr2'], {}), '(y_test, y_pred_xgblr2)\n', (2153, 2176), False, 'from sklearn.metrics import roc_curve, auc, roc_auc_score\n')] |
# read README.md file on my GitHub. It walks you through the instalation of necessary
# libraries, helps you solve common errors and provides useful references ;)
# https://github.com/scraptechguy/SpeechCheck
# import library to report time in debug printing
import datetime
# import elements from libraries for Microsoft Text Analytics
from azure.ai.textanalytics import TextAnalyticsClient
from azure.core.credentials import AzureKeyCredential
# import elements from libraries for Microsoft Speech Services
import azure.cognitiveservices.speech as speechsdk
# import elements from libraries for Natural Language Tool Kit
from nltk.tokenize import word_tokenize
from nltk.probability import FreqDist
# import matplot for data visualization
import matplotlib.pyplot as plt
# import wordcloud for common words visualization
from wordcloud import WordCloud, STOPWORDS
# and finally import kivy for gui
from kivy.app import App
from kivy.core import text
from kivy.uix.label import Label
from kivy.uix.gridlayout import GridLayout
from kivy.uix.textinput import TextInput
from kivy.uix.button import Button
from kivy.uix.widget import Widget
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from kivy.properties import StringProperty
from kivy.clock import Clock
from kivy.uix.scrollview import ScrollView
# input of keys and endpoints from Microsoft Azure for service authentication
# I left those here for you, so you don't have to create new resources. Please don't share those! :)
key1 = "<key>"
endpoint1 = "<endpoint>" # without the slash at the end ;)
speech_key, service_region = "<key>", "<region>"
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
speech_config.speech_recognition_language="en-US"
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config)
# authenticate client on Azure
def authenticate_client():
ta_credential = AzureKeyCredential(key1)
text_analytics_client = TextAnalyticsClient(
endpoint=endpoint1,
credential=ta_credential)
return text_analytics_client
client = authenticate_client()
# analyze document sentiment
def sentiment_analysis_example(client):
global doc_sentiment
global overall_scores
documents = [result]
response = client.analyze_sentiment(documents = documents)[0]
doc_sentiment = "Document Sentiment: {}".format(response.sentiment)
overall_scores = "Overall scores: positive={0:.2f}; neutral={1:.2f}; negative={2:.2f} \n".format(
response.confidence_scores.positive,
response.confidence_scores.neutral,
response.confidence_scores.negative,
)
# display key meaning of phrase
def key_phrase_extraction_example(client):
global keyphr
try:
documents = [result]
response = client.extract_key_phrases(documents = documents)[0]
if not response.is_error:
for phrase in response.key_phrases:
keyphr = " {}".format(phrase)
return keyphr
else:
print(response.id, response.error)
except Exception as err:
print("Encountered exception. {}".format(err))
# load file with kivy design language for gui design (not in a separate file, it's easier)
Builder.load_string("""
<w1>
BoxLayout:
orientation: "vertical"
size: root.width, root.height
spacing: 50
padding: 50
ScrollView:
do_scroll_x: False
do_scroll_y: True
size_hint: (7, 7)
Label:
size_hint_y: None
height: self.texture_size[1]
text_size: self.width, None
text: root.term_text
pos_hint: {"center_x": 0.5}
Button:
id: button1
text: "Listen to me now!"
background_normal: 'none'
background_color: (0, 64/255, 77/255)
on_release: root.listen()
Button:
id: button2
text: "Stop listening to me"
background_normal: 'none'
background_color: (0, 64/255, 77/255)
on_press: root.stop()
BoxLayout:
orientation: "horizontal"
Button:
text: "Clear"
background_normal: 'none'
background_color: (0, 77/255, 64/255)
pos_hint: {"center_x": 0.25}
size_hint: (0.75, 0.75)
on_press: root.clear()
Button:
text: "Spit out charts"
background_normal: 'none'
background_color: (0, 77/255, 64/255)
size_hint: (0.75, 0.75)
pos_hint: {"center_x": 0.25}
on_press: root.display_charts()
Button:
text: "Redo clear"
background_normal: 'none'
background_color: (0, 77/255, 64/255)
pos_hint: {"center_x": 0.75}
size_hint: (0.75, 0.75)
on_press: root.redo_clear()
""")
# set result (variable to which recognized speech is stored) to blank and done (variable that when False keeps calling
# azure speech recognition and when True stops listening session) to False
result = " "
done = False
# stop_cb() cuts off azure speech recognition session and set done to True -> in check() if done processes
# the recognized text
def stop_cb(evt):
print('CLOSING on {}'.format(evt))
speech_recognizer.stop_continuous_recognition()
global done
done = True
# collectResult() stores recognized utterances into variable result
def collectResult(evt):
global result
result += " {}".format(evt.result.text)
def prn(text):
now = datetime.datetime.now()
print("at {}: {} ".format(now, text))
class w1(Widget):
# term_text is what is displayed in the top label
term_text = StringProperty()
def __init__(self, **kwargs):
super(w1, self).__init__(**kwargs)
self.term_text = "terminal"
self.fdist = None
# code from here to the three hashes must be called only once as it would otherwise create
# multiple listening sessions and the variable result would store doubled or tripled (based on how many
# times have you started the session) results.
# create a listening session on Azure for speech recognition
speech_recognizer.recognized.connect(lambda evt: collectResult(evt))
speech_recognizer.session_started.connect(lambda evt: print('SESSION STARTED: {}'.format(evt)))
speech_recognizer.session_stopped.connect(lambda evt: print('SESSION STOPPED {}'.format(evt)))
speech_recognizer.canceled.connect(lambda evt: print('CANCELED {}'.format(evt)))
speech_recognizer.session_stopped.connect(stop_cb)
speech_recognizer.canceled.connect(stop_cb)
###
# create a function listen() that is bined with "Listen to me now!" button
# and starts speech recognition. Last but not least listen() runs check() funciton
# result is cleaned, we don't want to append new results
def listen(self):
prn("listen() called")
global result
global done
done = False
result = " "
speech_recognizer.start_continuous_recognition()
self.term_text = "Listening..."
self.check(0)
# create a function check() that checks (duh...) if done is True (done is set to True in stop_cb() function)
# recognized text is processed and result is displayed on screen
# if done is false, check() checks again after a little while (love my function naming xd)
def check(self, dt): # dt stands for delta time and is not crucial
global done
prn("check() called")
if done:
prn("check() - done = True")
self.process()
done = False
else:
prn("check scheduled")
Clock.schedule_once(self.check, 1)
# stop() function is binded to "stop listening to me" button and when pressed, stops speech recognition
# by stop_cb(), it then triggers process function and displays results on the screen
def stop(self):
stop_cb(None)
# process function processes (duh... x2) recognized text and is triggered by stop() function
def process(self):
prn("process() called")
# split up text to separate words
tokenized_word = word_tokenize(result)
# execute Azure functions
sentiment_analysis_example(client)
key_phrase_extraction_example(client)
# execute NLTK
self.fdist = FreqDist(tokenized_word)
moscom = self.fdist.most_common(2) # most common words (or words, change number in brackets
# display
self.term_text = " {} \n \n {} \n {} \n {} \n {} ".format(
result,
doc_sentiment,
overall_scores,
keyphr,
moscom,
)
# display_charts() binded to "stpit out charts" button displays (duh... x3) most frequent words' plot and wordcloud
def display_charts(self):
if self.fdist == None:
return
# execute matplot (data visualization)
self.fdist.plot(30,cumulative=False)
# Create and generate a word cloud image:
wordcloud = WordCloud().generate(result)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
wordcloud.generate(result)
plt.show()
# Clear stores term_text to a separate variable and
# clears (duh... x4) text displayed in the top label
def clear(self):
global redo
self.redo = self.term_text
self.term_text = "terminal"
# if you're stupid enough to accidentaly press a button, I got you. redo_clear() redos
# clear() function (bet you were expecting duh... x5, hah)
def redo_clear(self):
self.term_text = self.redo
class DaApp(App):
def build(self):
return w1()
if __name__ == '__main__':
DaApp().run()
| [
"matplotlib.pyplot.imshow",
"nltk.probability.FreqDist",
"kivy.lang.Builder.load_string",
"azure.cognitiveservices.speech.SpeechRecognizer",
"azure.cognitiveservices.speech.SpeechConfig",
"nltk.tokenize.word_tokenize",
"wordcloud.WordCloud",
"datetime.datetime.now",
"azure.core.credentials.AzureKeyC... | [((1675, 1745), 'azure.cognitiveservices.speech.SpeechConfig', 'speechsdk.SpeechConfig', ([], {'subscription': 'speech_key', 'region': 'service_region'}), '(subscription=speech_key, region=service_region)\n', (1697, 1745), True, 'import azure.cognitiveservices.speech as speechsdk\n'), ((1816, 1871), 'azure.cognitiveservices.speech.SpeechRecognizer', 'speechsdk.SpeechRecognizer', ([], {'speech_config': 'speech_config'}), '(speech_config=speech_config)\n', (1842, 1871), True, 'import azure.cognitiveservices.speech as speechsdk\n'), ((3322, 5179), 'kivy.lang.Builder.load_string', 'Builder.load_string', (['""" \n<w1>\n BoxLayout:\n \n orientation: "vertical"\n size: root.width, root.height\n spacing: 50\n padding: 50\n\n ScrollView:\n\n do_scroll_x: False\n do_scroll_y: True\n size_hint: (7, 7)\n\n Label:\n size_hint_y: None\n height: self.texture_size[1]\n text_size: self.width, None\n text: root.term_text\n pos_hint: {"center_x": 0.5}\n\n\n Button:\n\n id: button1\n text: "Listen to me now!"\n background_normal: \'none\'\n background_color: (0, 64/255, 77/255)\n on_release: root.listen()\n\n\n Button:\n\n id: button2\n text: "Stop listening to me"\n background_normal: \'none\'\n background_color: (0, 64/255, 77/255)\n on_press: root.stop()\n\n\n BoxLayout:\n\n orientation: "horizontal"\n\n\n Button:\n\n text: "Clear"\n background_normal: \'none\'\n background_color: (0, 77/255, 64/255)\n pos_hint: {"center_x": 0.25}\n size_hint: (0.75, 0.75)\n on_press: root.clear()\n\n\n Button:\n\n text: "Spit out charts"\n background_normal: \'none\'\n background_color: (0, 77/255, 64/255)\n size_hint: (0.75, 0.75)\n pos_hint: {"center_x": 0.25}\n on_press: root.display_charts()\n\n\n Button:\n \n text: "Redo clear"\n background_normal: \'none\'\n background_color: (0, 77/255, 64/255)\n pos_hint: {"center_x": 0.75}\n size_hint: (0.75, 0.75)\n on_press: root.redo_clear()\n \n"""'], {}), '(\n """ \n<w1>\n BoxLayout:\n \n orientation: "vertical"\n size: root.width, root.height\n spacing: 50\n padding: 50\n\n ScrollView:\n\n do_scroll_x: False\n do_scroll_y: True\n size_hint: (7, 7)\n\n Label:\n size_hint_y: None\n height: self.texture_size[1]\n text_size: self.width, None\n text: root.term_text\n pos_hint: {"center_x": 0.5}\n\n\n Button:\n\n id: button1\n text: "Listen to me now!"\n background_normal: \'none\'\n background_color: (0, 64/255, 77/255)\n on_release: root.listen()\n\n\n Button:\n\n id: button2\n text: "Stop listening to me"\n background_normal: \'none\'\n background_color: (0, 64/255, 77/255)\n on_press: root.stop()\n\n\n BoxLayout:\n\n orientation: "horizontal"\n\n\n Button:\n\n text: "Clear"\n background_normal: \'none\'\n background_color: (0, 77/255, 64/255)\n pos_hint: {"center_x": 0.25}\n size_hint: (0.75, 0.75)\n on_press: root.clear()\n\n\n Button:\n\n text: "Spit out charts"\n background_normal: \'none\'\n background_color: (0, 77/255, 64/255)\n size_hint: (0.75, 0.75)\n pos_hint: {"center_x": 0.25}\n on_press: root.display_charts()\n\n\n Button:\n \n text: "Redo clear"\n background_normal: \'none\'\n background_color: (0, 77/255, 64/255)\n pos_hint: {"center_x": 0.75}\n size_hint: (0.75, 0.75)\n on_press: root.redo_clear()\n \n"""\n )\n', (3341, 5179), False, 'from kivy.lang import Builder\n'), ((1954, 1978), 'azure.core.credentials.AzureKeyCredential', 'AzureKeyCredential', (['key1'], {}), '(key1)\n', (1972, 1978), False, 'from azure.core.credentials import AzureKeyCredential\n'), ((2007, 2072), 'azure.ai.textanalytics.TextAnalyticsClient', 'TextAnalyticsClient', ([], {'endpoint': 'endpoint1', 'credential': 'ta_credential'}), '(endpoint=endpoint1, credential=ta_credential)\n', (2026, 2072), False, 'from azure.ai.textanalytics import TextAnalyticsClient\n'), ((5854, 5877), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5875, 5877), False, 'import datetime\n'), ((6013, 6029), 'kivy.properties.StringProperty', 'StringProperty', ([], {}), '()\n', (6027, 6029), False, 'from kivy.properties import StringProperty\n'), ((8630, 8651), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['result'], {}), '(result)\n', (8643, 8651), False, 'from nltk.tokenize import word_tokenize\n'), ((8828, 8852), 'nltk.probability.FreqDist', 'FreqDist', (['tokenized_word'], {}), '(tokenized_word)\n', (8836, 8852), False, 'from nltk.probability import FreqDist\n'), ((9583, 9630), 'matplotlib.pyplot.imshow', 'plt.imshow', (['wordcloud'], {'interpolation': '"""bilinear"""'}), "(wordcloud, interpolation='bilinear')\n", (9593, 9630), True, 'import matplotlib.pyplot as plt\n'), ((9639, 9654), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (9647, 9654), True, 'import matplotlib.pyplot as plt\n'), ((9699, 9709), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9707, 9709), True, 'import matplotlib.pyplot as plt\n'), ((8106, 8140), 'kivy.clock.Clock.schedule_once', 'Clock.schedule_once', (['self.check', '(1)'], {}), '(self.check, 1)\n', (8125, 8140), False, 'from kivy.clock import Clock\n'), ((9546, 9557), 'wordcloud.WordCloud', 'WordCloud', ([], {}), '()\n', (9555, 9557), False, 'from wordcloud import WordCloud, STOPWORDS\n')] |
from bento_meta.objects import Node
from bento_meta_shim.models.mdbproperty import MDBproperty
class MDBnode():
__node = None
"""give proper life"""
def __init__(self, node):
self.__node = node
self.kind = node.mapspec_['label']
self.name = node.handle
self.handle = node.handle
self.model = node.model
self.nanoid = node.nanoid
self.props = self.__convert_props()
def old(self):
return self.__node
def __convert_props(self):
mdbprops = []
for tuple_key in self.__node.props:
_prop = self.__node.props[tuple_key]
mdbprops.append(MDBproperty(property=_prop, key=tuple_key))
return mdbprops
def __str__(self):
return 'a {}: {} called {}'.format(self.kind, self.nanoid, self.name)
def __repr__(self):
return '{}:{}:{}'.format(self.kind, self.nanoid, self.name) | [
"bento_meta_shim.models.mdbproperty.MDBproperty"
] | [((666, 708), 'bento_meta_shim.models.mdbproperty.MDBproperty', 'MDBproperty', ([], {'property': '_prop', 'key': 'tuple_key'}), '(property=_prop, key=tuple_key)\n', (677, 708), False, 'from bento_meta_shim.models.mdbproperty import MDBproperty\n')] |
import os
import requests
from urllib.error import URLError
from urllib.parse import urlparse
from urllib.request import urlopen
from luigi import Target, LocalTarget
from hashlib import sha1
from tasks.util import (query_cartodb, underscore_slugify, OBSERVATORY_PREFIX, OBSERVATORY_SCHEMA)
from tasks.meta import (OBSColumn, OBSTable, metadata, Geometry, Point,
Linestring, OBSColumnTable, OBSTag, current_session)
from sqlalchemy import Table, types, Column
from lib.logger import get_logger
LOGGER = get_logger(__name__)
class PostgresTarget(Target):
'''
PostgresTarget which by default uses command-line specified login.
'''
def __init__(self, schema, tablename, non_empty=True, where="1 = 1"):
self._schema = schema
self._tablename = tablename
self._non_empty = non_empty
self._where = where
@property
def table(self):
return '"{schema}".{tablename}'.format(schema=self._schema,
tablename=self._tablename)
@property
def tablename(self):
return self._tablename
@property
def schema(self):
return self._schema
@property
def qualified_tablename(self):
return '"{}".{}'.format(self.schema, self.tablename)
def _existenceness(self):
'''
Returns 0 if the table does not exist, 1 if it exists but has no
rows (is empty), and 2 if it exists and has one or more rows.
'''
session = current_session()
sql = '''
SELECT COUNT(*) FROM information_schema.tables
WHERE table_schema ILIKE '{schema}'
AND table_name ILIKE '{tablename}'
'''.format(
schema=self._schema,
tablename=self._tablename)
resp = session.execute(sql)
if int(resp.fetchone()[0]) == 0:
return 0
resp = session.execute(
'SELECT row_number() over () FROM "{schema}".{tablename} WHERE {where} LIMIT 1'.format(
schema=self._schema, tablename=self._tablename,
where=self._where))
if resp.fetchone() is None:
return 1
else:
return 2
def empty(self):
'''
Returns True if the table exists but has no rows in it.
'''
return self._existenceness() == 1
def exists(self):
'''
Returns True if the table exists and has at least one row in it.
'''
if self._non_empty:
return self._existenceness() == 2
else:
return self._existenceness() >= 1
def exists_or_empty(self):
'''
Returns True if the table exists, even if it is empty.
'''
return self._existenceness() >= 1
class CartoDBTarget(Target):
'''
Target which is a CartoDB table
'''
def __init__(self, tablename, carto_url=None, api_key=None):
self.tablename = tablename
self.carto_url = carto_url
self.api_key = api_key
def __str__(self):
return self.tablename
def exists(self):
resp = query_cartodb(
'SELECT row_number() over () FROM "{tablename}" LIMIT 1'.format(
tablename=self.tablename),
api_key=self.api_key,
carto_url=self.carto_url)
if resp.status_code != 200:
return False
return resp.json()['total_rows'] > 0
def remove(self, carto_url=None, api_key=None):
api_key = api_key or os.environ['CARTODB_API_KEY']
try:
while True:
resp = requests.get('{url}/api/v1/tables/{tablename}?api_key={api_key}'.format(
url=carto_url,
tablename=self.tablename,
api_key=api_key
))
viz_id = resp.json()['id']
# delete dataset by id DELETE
# https://observatory.cartodb.com/api/v1/viz/ed483a0b-7842-4610-9f6c-8591273b8e5c
try:
requests.delete('{url}/api/v1/viz/{viz_id}?api_key={api_key}'.format(
url=carto_url,
viz_id=viz_id,
api_key=api_key
), timeout=1)
except requests.Timeout:
pass
except ValueError:
pass
query_cartodb('DROP TABLE IF EXISTS {tablename}'.format(tablename=self.tablename))
assert not self.exists()
class ColumnTarget(Target):
'''
'''
def __init__(self, column, task):
self._id = column.id
self._task = task
self._column = column
def get(self, session):
'''
Return a copy of the underlying OBSColumn in the specified session.
'''
with session.no_autoflush:
return session.query(OBSColumn).get(self._id)
def update_or_create(self):
self._column = current_session().merge(self._column)
def exists(self):
existing = self.get(current_session())
new_version = float(self._column.version or 0.0)
if existing:
existing_version = float(existing.version or 0.0)
current_session().expunge(existing)
else:
existing_version = 0.0
if existing and existing_version == new_version:
return True
elif existing and existing_version > new_version:
raise Exception('Metadata version mismatch: cannot run task {task} '
'(id "{id}") '
'with ETL version ({etl}) older than what is in '
'DB ({db})'.format(task=self._task.task_id,
id=self._id,
etl=new_version,
db=existing_version))
return False
class TagTarget(Target):
'''
'''
def __init__(self, tag, task):
self._id = tag.id
self._tag = tag
self._task = task
_tag_cache = {}
def get(self, session):
'''
Return a copy of the underlying OBSTag in the specified session.
'''
if not self._tag_cache.get(self._id, None):
with session.no_autoflush:
self._tag_cache[self._id] = session.query(OBSTag).get(self._id)
return self._tag_cache[self._id]
def update_or_create(self):
with current_session().no_autoflush:
self._tag = current_session().merge(self._tag)
def exists(self):
session = current_session()
existing = self.get(session)
new_version = self._tag.version or 0.0
if existing:
if existing in session:
session.expunge(existing)
existing_version = existing.version or 0.0
if float(existing_version) == float(new_version):
return True
if existing_version > new_version:
raise Exception('Metadata version mismatch: cannot run task {task} '
'(id "{id}") '
'with ETL version ({etl}) older than what is in '
'DB ({db})'.format(task=self._task.task_id,
id=self._id,
etl=new_version,
db=existing_version))
return False
class TableTarget(Target):
def __init__(self, schema, name, obs_table, columns, task):
'''
columns: should be an ordereddict if you want to specify columns' order
in the table
'''
self._id = '.'.join([schema, name])
obs_table.id = self._id
obs_table.tablename = '{prefix}{name}'.format(prefix=OBSERVATORY_PREFIX, name=sha1(
underscore_slugify(self._id).encode('utf-8')).hexdigest())
self.table = '{schema}.{table}'.format(schema=OBSERVATORY_SCHEMA, table=obs_table.tablename)
self.qualified_tablename = '"{schema}".{table}'.format(schema=OBSERVATORY_SCHEMA, table=obs_table.tablename)
self.obs_table = obs_table
self._tablename = obs_table.tablename
self._schema = schema
self._name = name
self._obs_dict = obs_table.__dict__.copy()
self._columns = columns
self._task = task
if obs_table.tablename in metadata.tables:
self._table = metadata.tables[obs_table.tablename]
else:
self._table = None
@property
def tablename(self):
return self._tablename
@property
def schema(self):
return 'observatory'
def sync(self):
'''
Whether this data should be synced to carto. Defaults to True.
'''
return True
def exists(self):
'''
We always want to run this at least once, because we can always
regenerate tabular data from scratch.
'''
session = current_session()
existing = self.get(session)
new_version = float(self.obs_table.version or 0.0)
if existing:
existing_version = float(existing.version or 0.0)
if existing in session:
session.expunge(existing)
else:
existing_version = 0.0
if existing and existing_version == new_version:
resp = session.execute(
'SELECT COUNT(*) FROM information_schema.tables '
"WHERE table_schema = '{schema}' "
" AND table_name = '{tablename}' ".format(
schema='observatory',
tablename=existing.tablename))
if int(resp.fetchone()[0]) == 0:
return False
resp = session.execute(
'SELECT row_number() over () '
'FROM "{schema}".{tablename} LIMIT 1 '.format(
schema='observatory',
tablename=existing.tablename))
return resp.fetchone() is not None
elif existing and existing_version > new_version:
raise Exception('Metadata version mismatch: cannot run task {task} '
'(id "{id}") '
'with ETL version ({etl}) older than what is in '
'DB ({db})'.format(task=self._task.task_id,
id=self._id,
etl=new_version,
db=existing_version))
return False
def get(self, session):
'''
Return a copy of the underlying OBSTable in the specified session.
'''
with session.no_autoflush:
return session.query(OBSTable).get(self._id)
def update_or_create_table(self):
session = current_session()
# create new local data table
columns = []
for colname, coltarget in list(self._columns.items()):
colname = colname.lower()
col = coltarget.get(session)
# Column info for sqlalchemy's internal metadata
if col.type.lower() == 'geometry':
coltype = Geometry
elif col.type.lower().startswith('geometry(point'):
coltype = Point
elif col.type.lower().startswith('geometry(linestring'):
coltype = Linestring
# For enum type, pull keys from extra["categories"]
elif col.type.lower().startswith('enum'):
cats = list(col.extra['categories'].keys())
coltype = types.Enum(*cats, name=col.id + '_enum')
else:
coltype = getattr(types, col.type.capitalize())
columns.append(Column(colname, coltype))
obs_table = self.get(session) or self.obs_table
# replace local data table
if obs_table.id in metadata.tables:
metadata.tables[obs_table.id].drop()
self._table = Table(obs_table.tablename, metadata, *columns,
extend_existing=True, schema='observatory')
session.commit()
self._table.drop(checkfirst=True)
self._table.create()
def update_or_create_metadata(self, _testmode=False):
session = current_session()
# replace metadata table
self.obs_table = session.merge(self.obs_table)
obs_table = self.obs_table
for i, colname_coltarget in enumerate(self._columns.items()):
colname, coltarget = colname_coltarget
colname = colname.lower()
col = coltarget.get(session)
if _testmode:
coltable = OBSColumnTable(colname=colname, table=obs_table,
column=col)
else:
# Column info for obs metadata
coltable = session.query(OBSColumnTable).filter_by(
column_id=col.id, table_id=obs_table.id).first()
if coltable:
coltable.colname = colname
else:
# catch the case where a column id has changed
coltable = session.query(OBSColumnTable).filter_by(
table_id=obs_table.id, colname=colname).first()
if coltable:
coltable.column = col
else:
coltable = OBSColumnTable(colname=colname, table=obs_table,
column=col)
session.add(coltable)
class RepoTarget(LocalTarget):
def __init__(self, schema, tablename, repo_dir, resource_id, version, filename):
self.format = None
self.is_tmp = False
self.schema = schema
self.tablename = tablename
self.repo_dir = repo_dir
self.resource_id = resource_id
self.version = version
self.filename = filename
@property
def path(self):
path = self._get_path()
if path and os.path.isfile(path):
return path
else:
return self._build_path()
def _build_path(self):
return os.path.join(self.repo_dir, self.resource_id, str(self.version), self.filename)
def _get_path(self):
path = None
query = '''
SELECT path FROM "{schema}".{table}
WHERE id = '{resource_id}'
AND version = {version}
'''.format(schema=self.schema,
table=self.tablename,
resource_id=self.resource_id,
version=self.version)
try:
result = current_session().execute(query).fetchone()
if result:
path = result[0]
except:
path = None
return path
def exists(self):
path = self._get_path()
return path and os.path.isfile(path)
class ConstraintExistsTarget(Target):
def __init__(self, schema, table, constraint):
self.schema = schema
self.tablename = table
self.constraint = constraint
self.session = current_session()
@property
def table(self):
return '"{schema}".{tablename}'.format(schema=self.schema,
tablename=self.tablename)
def exists(self):
sql = "SELECT 1 FROM information_schema.constraint_column_usage " \
"WHERE table_schema = '{schema}' " \
" AND table_name ilike '{table}' " \
" AND constraint_name = '{constraint}'"
check = sql.format(schema=self.schema,
table=self.tablename,
constraint=self.constraint)
return len(self.session.execute(check).fetchall()) > 0
class PostgresFunctionTarget(Target):
def __init__(self, schema, function_name):
self._schema = schema
self._function_name = function_name
self._session = current_session()
@property
def function(self):
return '"{schema}".{function_name}'.format(schema=self._schema,
function_name=self._function_name)
@property
def function_name(self):
return self._function_name
@property
def schema(self):
return self._schema
def exists(self):
query = '''
SELECT 1 FROM information_schema.routines
WHERE routine_schema = '{schema}'
AND routine_name = '{function_name}'
'''.format(
schema=self._schema,
function_name=self._function_name)
return len(self._session.execute(query).fetchall()) > 0
class URLTarget(Target):
'''
Accepts both local paths and urls
'''
def __init__(self, url):
self.path = url
scheme = urlparse(url).scheme
if scheme == '':
self.url = 'file://{}'.format(url)
else:
self.url = url
def exists(self):
try:
urlopen(self.url)
return True
except URLError:
return False
| [
"tasks.util.underscore_slugify",
"urllib.parse.urlparse",
"sqlalchemy.Table",
"lib.logger.get_logger",
"os.path.isfile",
"sqlalchemy.types.Enum",
"sqlalchemy.Column",
"tasks.meta.OBSColumnTable",
"tasks.meta.current_session",
"urllib.request.urlopen"
] | [((532, 552), 'lib.logger.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (542, 552), False, 'from lib.logger import get_logger\n'), ((1518, 1535), 'tasks.meta.current_session', 'current_session', ([], {}), '()\n', (1533, 1535), False, 'from tasks.meta import OBSColumn, OBSTable, metadata, Geometry, Point, Linestring, OBSColumnTable, OBSTag, current_session\n'), ((6642, 6659), 'tasks.meta.current_session', 'current_session', ([], {}), '()\n', (6657, 6659), False, 'from tasks.meta import OBSColumn, OBSTable, metadata, Geometry, Point, Linestring, OBSColumnTable, OBSTag, current_session\n'), ((9088, 9105), 'tasks.meta.current_session', 'current_session', ([], {}), '()\n', (9103, 9105), False, 'from tasks.meta import OBSColumn, OBSTable, metadata, Geometry, Point, Linestring, OBSColumnTable, OBSTag, current_session\n'), ((10959, 10976), 'tasks.meta.current_session', 'current_session', ([], {}), '()\n', (10974, 10976), False, 'from tasks.meta import OBSColumn, OBSTable, metadata, Geometry, Point, Linestring, OBSColumnTable, OBSTag, current_session\n'), ((12113, 12208), 'sqlalchemy.Table', 'Table', (['obs_table.tablename', 'metadata', '*columns'], {'extend_existing': '(True)', 'schema': '"""observatory"""'}), "(obs_table.tablename, metadata, *columns, extend_existing=True, schema\n ='observatory')\n", (12118, 12208), False, 'from sqlalchemy import Table, types, Column\n'), ((12405, 12422), 'tasks.meta.current_session', 'current_session', ([], {}), '()\n', (12420, 12422), False, 'from tasks.meta import OBSColumn, OBSTable, metadata, Geometry, Point, Linestring, OBSColumnTable, OBSTag, current_session\n'), ((15294, 15311), 'tasks.meta.current_session', 'current_session', ([], {}), '()\n', (15309, 15311), False, 'from tasks.meta import OBSColumn, OBSTable, metadata, Geometry, Point, Linestring, OBSColumnTable, OBSTag, current_session\n'), ((16144, 16161), 'tasks.meta.current_session', 'current_session', ([], {}), '()\n', (16159, 16161), False, 'from tasks.meta import OBSColumn, OBSTable, metadata, Geometry, Point, Linestring, OBSColumnTable, OBSTag, current_session\n'), ((5067, 5084), 'tasks.meta.current_session', 'current_session', ([], {}), '()\n', (5082, 5084), False, 'from tasks.meta import OBSColumn, OBSTable, metadata, Geometry, Point, Linestring, OBSColumnTable, OBSTag, current_session\n'), ((14162, 14182), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (14176, 14182), False, 'import os\n'), ((15062, 15082), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (15076, 15082), False, 'import os\n'), ((17047, 17060), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (17055, 17060), False, 'from urllib.parse import urlparse\n'), ((17229, 17246), 'urllib.request.urlopen', 'urlopen', (['self.url'], {}), '(self.url)\n', (17236, 17246), False, 'from urllib.request import urlopen\n'), ((4978, 4995), 'tasks.meta.current_session', 'current_session', ([], {}), '()\n', (4993, 4995), False, 'from tasks.meta import OBSColumn, OBSTable, metadata, Geometry, Point, Linestring, OBSColumnTable, OBSTag, current_session\n'), ((6510, 6527), 'tasks.meta.current_session', 'current_session', ([], {}), '()\n', (6525, 6527), False, 'from tasks.meta import OBSColumn, OBSTable, metadata, Geometry, Point, Linestring, OBSColumnTable, OBSTag, current_session\n'), ((11880, 11904), 'sqlalchemy.Column', 'Column', (['colname', 'coltype'], {}), '(colname, coltype)\n', (11886, 11904), False, 'from sqlalchemy import Table, types, Column\n'), ((12802, 12862), 'tasks.meta.OBSColumnTable', 'OBSColumnTable', ([], {'colname': 'colname', 'table': 'obs_table', 'column': 'col'}), '(colname=colname, table=obs_table, column=col)\n', (12816, 12862), False, 'from tasks.meta import OBSColumn, OBSTable, metadata, Geometry, Point, Linestring, OBSColumnTable, OBSTag, current_session\n'), ((5238, 5255), 'tasks.meta.current_session', 'current_session', ([], {}), '()\n', (5253, 5255), False, 'from tasks.meta import OBSColumn, OBSTable, metadata, Geometry, Point, Linestring, OBSColumnTable, OBSTag, current_session\n'), ((6566, 6583), 'tasks.meta.current_session', 'current_session', ([], {}), '()\n', (6581, 6583), False, 'from tasks.meta import OBSColumn, OBSTable, metadata, Geometry, Point, Linestring, OBSColumnTable, OBSTag, current_session\n'), ((13556, 13616), 'tasks.meta.OBSColumnTable', 'OBSColumnTable', ([], {'colname': 'colname', 'table': 'obs_table', 'column': 'col'}), '(colname=colname, table=obs_table, column=col)\n', (13570, 13616), False, 'from tasks.meta import OBSColumn, OBSTable, metadata, Geometry, Point, Linestring, OBSColumnTable, OBSTag, current_session\n'), ((11730, 11770), 'sqlalchemy.types.Enum', 'types.Enum', (['*cats'], {'name': "(col.id + '_enum')"}), "(*cats, name=col.id + '_enum')\n", (11740, 11770), False, 'from sqlalchemy import Table, types, Column\n'), ((14822, 14839), 'tasks.meta.current_session', 'current_session', ([], {}), '()\n', (14837, 14839), False, 'from tasks.meta import OBSColumn, OBSTable, metadata, Geometry, Point, Linestring, OBSColumnTable, OBSTag, current_session\n'), ((7950, 7978), 'tasks.util.underscore_slugify', 'underscore_slugify', (['self._id'], {}), '(self._id)\n', (7968, 7978), False, 'from tasks.util import query_cartodb, underscore_slugify, OBSERVATORY_PREFIX, OBSERVATORY_SCHEMA\n')] |
import pytest
import docker
import requests
name = "ctomcatapp"
@pytest.fixture
def error_fixture():
assert 0
def test_container():
client = docker.from_env()
container = client.containers.get(name)
assert container.status == "running"
def test_app():
response = requests.get('http://localhost:8080/sample')
assert response.status_code == 200
# ----------------------------------------------------------
# import requests
# import logging
#
# baseUrl = "http://localhost:8080/"
# LOGGER = logging.getLogger(__name__)
#
# def test_sample_app() :
# path = "sample"
# # LOGGER.info('Site response')
# response = requests.get(url=baseUrl+path,timeout = 0.5)
# assert response.status_code == 200
| [
"docker.from_env",
"requests.get"
] | [((153, 170), 'docker.from_env', 'docker.from_env', ([], {}), '()\n', (168, 170), False, 'import docker\n'), ((288, 332), 'requests.get', 'requests.get', (['"""http://localhost:8080/sample"""'], {}), "('http://localhost:8080/sample')\n", (300, 332), False, 'import requests\n')] |
#!/usr/bin/env python
# encoding: utf-8
"""
calc beat score of files
copyright: www.mgtv.com
"""
import os
import sys
import argparse
import numpy as np
import traceback
import beat_evaluation_toolbox as be
def calc_beat_score_of_file(annotation_file, beat_file):
#check input params
if os.path.exists(annotation_file) == False:
print("failed! annotation_file:%s not exist\n" % (annotation_file))
return False, 0.0
if os.path.exists(beat_file) == False:
print("failed! beat_file:%s not exist\n" % (beat_file))
return False, 0.0
data_annotation = np.loadtxt(annotation_file, usecols=(0))
data_annotation = np.expand_dims(data_annotation, axis=0)
data_beat = np.loadtxt(beat_file, usecols=(0))
data_beat = np.expand_dims(data_beat, axis=0)
R = be.evaluate_db(data_annotation, data_beat, 'all', doCI=False)
#输出结果
print(R['scores'])
pscore = R['scores']['pScore'][0]
f_measure = R['scores']['fMeasure'][0]
aml_c = R['scores']['amlC'][0]
aml_t = R['scores']['amlT'][0]
cml_c = R['scores']['cmlC'][0]
cml_t = R['scores']['cmlT'][0]
cem_acc = R['scores']['cemgilAcc'][0]
total_score = (aml_c + cem_acc + cml_c + f_measure + pscore + cml_t + aml_t) / 7
print("[%s] score:%.4f"%(beat_file, total_score))
return True, total_score
def calc_avg_score_of_files(annotation_files_dir, beat_files_dir, file_extension):
#check input params
if os.path.exists(annotation_files_dir) == False:
print("failed! annotation_files_dir:%s not exist\n" % (annotation_files_dir))
return False, 0.0
if os.path.exists(beat_files_dir) == False:
print("failed! beat_files_dir:%s not exist\n" % (beat_files_dir))
return False, 0.0
if not annotation_files_dir.endswith("/"):
annotation_files_dir += "/"
if not beat_files_dir.endswith("/"):
beat_files_dir += "/"
annotation_files_url = [f for f in os.listdir(annotation_files_dir) if f.endswith((file_extension))]
nb_annotation_files = len(annotation_files_url)
beat_files_url = [f for f in os.listdir(beat_files_dir) if f.endswith((file_extension))]
nb_beat_files = len(beat_files_url)
if nb_annotation_files != nb_beat_files or nb_annotation_files == 0:
print("failed! annotation files num:%d beat files num:%d\n" % (nb_annotation_files, nb_beat_files))
return False, 0.0
sum_score = 0.0
for i in range(nb_annotation_files):
annotation_file = annotation_files_dir + annotation_files_url[i]
beat_file = beat_files_dir + annotation_files_url[i]
if os.path.exists(beat_file) == False:
print("failed! beat file:%s not exist\n" % (beat_file))
return False, 0.0
ret, score = calc_beat_score_of_file(annotation_file, beat_file)
if ret == False:
print("failed! calc_beat_score_of_file failed for file:%s\n" % (beat_file))
return False, 0.0
sum_score = sum_score + score
avg_score = sum_score / nb_annotation_files
return True, avg_score
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="calc avg score of beat(downbeat) files")
parser.add_argument("--annotation_files_dir", required=True, help="Path to input annotation files dir", default="")
parser.add_argument("--beat_files_dir", required=True, help="Path to input beats files dir", default="")
parser.add_argument("--file_extension", required=True, help="File ext, beat or downbeat", default="")
# 获得工作目录,程序模块名称,并切换工作目录
s_work_path, s_module_name = os.path.split(os.path.abspath(sys.argv[0]))
print(s_work_path, s_module_name)
os.chdir(s_work_path)
try:
args = parser.parse_args()
ret, score = calc_avg_score_of_files(args.annotation_files_dir, args.beat_files_dir, args.file_extension)
print("Final score:%.4f" % score)
except Exception as e:
traceback.print_exc()
print("Exception running beat_score_calc: [%s]" % (str(e)))
ret = False
if ret == True:
sys.exit(0)
else:
sys.exit(1)
| [
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"beat_evaluation_toolbox.evaluate_db",
"os.chdir",
"numpy.expand_dims",
"sys.exit",
"os.path.abspath",
"numpy.loadtxt",
"traceback.print_exc"
] | [((640, 678), 'numpy.loadtxt', 'np.loadtxt', (['annotation_file'], {'usecols': '(0)'}), '(annotation_file, usecols=0)\n', (650, 678), True, 'import numpy as np\n'), ((703, 742), 'numpy.expand_dims', 'np.expand_dims', (['data_annotation'], {'axis': '(0)'}), '(data_annotation, axis=0)\n', (717, 742), True, 'import numpy as np\n'), ((769, 801), 'numpy.loadtxt', 'np.loadtxt', (['beat_file'], {'usecols': '(0)'}), '(beat_file, usecols=0)\n', (779, 801), True, 'import numpy as np\n'), ((820, 853), 'numpy.expand_dims', 'np.expand_dims', (['data_beat'], {'axis': '(0)'}), '(data_beat, axis=0)\n', (834, 853), True, 'import numpy as np\n'), ((867, 928), 'beat_evaluation_toolbox.evaluate_db', 'be.evaluate_db', (['data_annotation', 'data_beat', '"""all"""'], {'doCI': '(False)'}), "(data_annotation, data_beat, 'all', doCI=False)\n", (881, 928), True, 'import beat_evaluation_toolbox as be\n'), ((3285, 3362), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""calc avg score of beat(downbeat) files"""'}), "(description='calc avg score of beat(downbeat) files')\n", (3308, 3362), False, 'import argparse\n'), ((3850, 3871), 'os.chdir', 'os.chdir', (['s_work_path'], {}), '(s_work_path)\n', (3858, 3871), False, 'import os\n'), ((314, 345), 'os.path.exists', 'os.path.exists', (['annotation_file'], {}), '(annotation_file)\n', (328, 345), False, 'import os\n'), ((474, 499), 'os.path.exists', 'os.path.exists', (['beat_file'], {}), '(beat_file)\n', (488, 499), False, 'import os\n'), ((1539, 1575), 'os.path.exists', 'os.path.exists', (['annotation_files_dir'], {}), '(annotation_files_dir)\n', (1553, 1575), False, 'import os\n'), ((1714, 1744), 'os.path.exists', 'os.path.exists', (['beat_files_dir'], {}), '(beat_files_dir)\n', (1728, 1744), False, 'import os\n'), ((3778, 3806), 'os.path.abspath', 'os.path.abspath', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (3793, 3806), False, 'import os\n'), ((4280, 4291), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4288, 4291), False, 'import sys\n'), ((4310, 4321), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4318, 4321), False, 'import sys\n'), ((2059, 2091), 'os.listdir', 'os.listdir', (['annotation_files_dir'], {}), '(annotation_files_dir)\n', (2069, 2091), False, 'import os\n'), ((2211, 2237), 'os.listdir', 'os.listdir', (['beat_files_dir'], {}), '(beat_files_dir)\n', (2221, 2237), False, 'import os\n'), ((2744, 2769), 'os.path.exists', 'os.path.exists', (['beat_file'], {}), '(beat_file)\n', (2758, 2769), False, 'import os\n'), ((4133, 4154), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (4152, 4154), False, 'import traceback\n')] |
from flask_wtf import FlaskForm
from wtforms import HiddenField, FloatField, SelectField, StringField, SubmitField, ValidationError
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.validators import Length, Optional, Required
from .. models import EventFrameAttributeTemplate, Lookup, LookupValue, UnitOfMeasurement
class EventFrameAttributeTemplateForm(FlaskForm):
name = StringField("Name", validators = [Required(), Length(1, 45)])
description = StringField("Description", validators = [Length(0, 255)])
lookup = QuerySelectField("Lookup", validators = [Required()], get_label = "Name")
defaultStartLookupValue = SelectField("Default Start Value", validators = [Optional()], coerce = float)
defaultEndLookupValue = SelectField("Default End Value", validators = [Optional()], coerce = float)
unitOfMeasurement = QuerySelectField("Unit", query_factory = lambda: UnitOfMeasurement.query. \
order_by(UnitOfMeasurement.Abbreviation), get_label = "Abbreviation")
defaultStartValue = FloatField("Default Start Value", validators = [Optional()])
defaultEndValue = FloatField("Default End Value", validators = [Optional()])
eventFrameAttributeTemplateId = HiddenField()
eventFrameTemplateId = HiddenField()
requestReferrer = HiddenField()
submit = SubmitField("Save")
def validate_name(self, field):
validationError = False
eventFrameAttributeTemplate = EventFrameAttributeTemplate.query.filter_by(EventFrameTemplateId = self.eventFrameTemplateId.data,
Name = field.data).first()
if eventFrameAttributeTemplate:
if self.eventFrameAttributeTemplateId.data == "":
# Trying to add a new eventFrameAttributeTemplate using a name that already exists.
validationError = True
else:
if int(self.eventFrameAttributeTemplateId.data) != eventFrameAttributeTemplate.EventFrameAttributeTemplateId:
# Trying to change the name of an eventFrameAttributeTemplate to a name that already exists.
validationError = True
if validationError:
raise ValidationError('The name "{}" already exists.'.format(field.data))
| [
"wtforms.SubmitField",
"wtforms.HiddenField",
"wtforms.validators.Required",
"wtforms.validators.Optional",
"wtforms.validators.Length"
] | [((1185, 1198), 'wtforms.HiddenField', 'HiddenField', ([], {}), '()\n', (1196, 1198), False, 'from wtforms import HiddenField, FloatField, SelectField, StringField, SubmitField, ValidationError\n'), ((1223, 1236), 'wtforms.HiddenField', 'HiddenField', ([], {}), '()\n', (1234, 1236), False, 'from wtforms import HiddenField, FloatField, SelectField, StringField, SubmitField, ValidationError\n'), ((1256, 1269), 'wtforms.HiddenField', 'HiddenField', ([], {}), '()\n', (1267, 1269), False, 'from wtforms import HiddenField, FloatField, SelectField, StringField, SubmitField, ValidationError\n'), ((1280, 1299), 'wtforms.SubmitField', 'SubmitField', (['"""Save"""'], {}), "('Save')\n", (1291, 1299), False, 'from wtforms import HiddenField, FloatField, SelectField, StringField, SubmitField, ValidationError\n'), ((432, 442), 'wtforms.validators.Required', 'Required', ([], {}), '()\n', (440, 442), False, 'from wtforms.validators import Length, Optional, Required\n'), ((444, 457), 'wtforms.validators.Length', 'Length', (['(1)', '(45)'], {}), '(1, 45)\n', (450, 457), False, 'from wtforms.validators import Length, Optional, Required\n'), ((516, 530), 'wtforms.validators.Length', 'Length', (['(0)', '(255)'], {}), '(0, 255)\n', (522, 530), False, 'from wtforms.validators import Length, Optional, Required\n'), ((584, 594), 'wtforms.validators.Required', 'Required', ([], {}), '()\n', (592, 594), False, 'from wtforms.validators import Length, Optional, Required\n'), ((693, 703), 'wtforms.validators.Optional', 'Optional', ([], {}), '()\n', (701, 703), False, 'from wtforms.validators import Length, Optional, Required\n'), ((794, 804), 'wtforms.validators.Optional', 'Optional', ([], {}), '()\n', (802, 804), False, 'from wtforms.validators import Length, Optional, Required\n'), ((1061, 1071), 'wtforms.validators.Optional', 'Optional', ([], {}), '()\n', (1069, 1071), False, 'from wtforms.validators import Length, Optional, Required\n'), ((1139, 1149), 'wtforms.validators.Optional', 'Optional', ([], {}), '()\n', (1147, 1149), False, 'from wtforms.validators import Length, Optional, Required\n')] |
from django.core.cache import cache
from asgiref.sync import async_to_sync
from celery import shared_task
from channels.layers import get_channel_layer
from cryptocompy import price
@shared_task
def update_cc_prices():
cryptocoins = ['ETH', 'BTC']
currencies = ['EUR', 'USD']
response = price.get_current_price(cryptocoins, currencies)
channel_layer = get_channel_layer()
for cryptocoin in cryptocoins:
for currency in currencies:
latest_price = response[cryptocoin][currency]
ticker_code = cryptocoin + currency
if cache.get(ticker_code) != latest_price:
cache.set(ticker_code, response[cryptocoin][currency])
async_to_sync(channel_layer.group_send)(
ticker_code,
{
'type': 'price_update',
'price': latest_price,
}
)
| [
"cryptocompy.price.get_current_price",
"channels.layers.get_channel_layer",
"django.core.cache.cache.set",
"asgiref.sync.async_to_sync",
"django.core.cache.cache.get"
] | [((302, 350), 'cryptocompy.price.get_current_price', 'price.get_current_price', (['cryptocoins', 'currencies'], {}), '(cryptocoins, currencies)\n', (325, 350), False, 'from cryptocompy import price\n'), ((371, 390), 'channels.layers.get_channel_layer', 'get_channel_layer', ([], {}), '()\n', (388, 390), False, 'from channels.layers import get_channel_layer\n'), ((583, 605), 'django.core.cache.cache.get', 'cache.get', (['ticker_code'], {}), '(ticker_code)\n', (592, 605), False, 'from django.core.cache import cache\n'), ((639, 693), 'django.core.cache.cache.set', 'cache.set', (['ticker_code', 'response[cryptocoin][currency]'], {}), '(ticker_code, response[cryptocoin][currency])\n', (648, 693), False, 'from django.core.cache import cache\n'), ((710, 749), 'asgiref.sync.async_to_sync', 'async_to_sync', (['channel_layer.group_send'], {}), '(channel_layer.group_send)\n', (723, 749), False, 'from asgiref.sync import async_to_sync\n')] |
# Created: 17.02.2019
# Copyright (c) 2019, <NAME>
# License: MIT License
from typing import TYPE_CHECKING
import logging
from ezdxf.lldxf.attributes import DXFAttr, DXFAttributes, DefSubclass
from ezdxf.lldxf.const import DXF12, SUBCLASS_MARKER
from ezdxf.entities.dxfentity import base_class, SubclassProcessor, DXFEntity
from ezdxf.entities.layer import acdb_symbol_table_record
from .factory import register_entity
logger = logging.getLogger('ezdxf')
if TYPE_CHECKING:
from ezdxf.eztypes import TagWriter, DXFNamespace
__all__ = ['Textstyle']
acdb_style = DefSubclass('AcDbTextStyleTableRecord', {
'name': DXFAttr(2, default='Standard'),
'flags': DXFAttr(70, default=0),
'height': DXFAttr(40, default=0), # fixed height, 0 if not fixed
'width': DXFAttr(41, default=1), # width factor
'oblique': DXFAttr(50, default=0), # oblique angle in degree, 0 = vertical
'generation_flags': DXFAttr(71, default=0), # 2 = backward, 4 = mirrored in Y
'last_height': DXFAttr(42, default=2.5), # last height used
'font': DXFAttr(3, default='txt'), # primary font file name
'bigfont': DXFAttr(4, default=''), # big font name, blank if none
})
@register_entity
class Textstyle(DXFEntity):
""" DXF STYLE entity """
DXFTYPE = 'STYLE'
DXFATTRIBS = DXFAttributes(base_class, acdb_symbol_table_record, acdb_style)
def load_dxf_attribs(self, processor: SubclassProcessor = None) -> 'DXFNamespace':
dxf = super().load_dxf_attribs(processor)
if processor:
tags = processor.load_dxfattribs_into_namespace(dxf, acdb_style)
if len(tags) and not processor.r12:
processor.log_unprocessed_tags(tags, subclass=acdb_style.name)
return dxf
def export_entity(self, tagwriter: 'TagWriter') -> None:
super().export_entity(tagwriter)
# AcDbEntity export is done by parent class
if tagwriter.dxfversion > DXF12:
tagwriter.write_tag2(SUBCLASS_MARKER, acdb_symbol_table_record.name)
tagwriter.write_tag2(SUBCLASS_MARKER, acdb_style.name)
# for all DXF versions
self.dxf.export_dxf_attribs(tagwriter, [
'name', 'flags', 'height', 'width', 'oblique', 'generation_flags', 'last_height', 'font', 'bigfont'
])
| [
"logging.getLogger",
"ezdxf.lldxf.attributes.DXFAttr",
"ezdxf.lldxf.attributes.DXFAttributes"
] | [((429, 455), 'logging.getLogger', 'logging.getLogger', (['"""ezdxf"""'], {}), "('ezdxf')\n", (446, 455), False, 'import logging\n'), ((1296, 1359), 'ezdxf.lldxf.attributes.DXFAttributes', 'DXFAttributes', (['base_class', 'acdb_symbol_table_record', 'acdb_style'], {}), '(base_class, acdb_symbol_table_record, acdb_style)\n', (1309, 1359), False, 'from ezdxf.lldxf.attributes import DXFAttr, DXFAttributes, DefSubclass\n'), ((622, 652), 'ezdxf.lldxf.attributes.DXFAttr', 'DXFAttr', (['(2)'], {'default': '"""Standard"""'}), "(2, default='Standard')\n", (629, 652), False, 'from ezdxf.lldxf.attributes import DXFAttr, DXFAttributes, DefSubclass\n'), ((667, 689), 'ezdxf.lldxf.attributes.DXFAttr', 'DXFAttr', (['(70)'], {'default': '(0)'}), '(70, default=0)\n', (674, 689), False, 'from ezdxf.lldxf.attributes import DXFAttr, DXFAttributes, DefSubclass\n'), ((705, 727), 'ezdxf.lldxf.attributes.DXFAttr', 'DXFAttr', (['(40)'], {'default': '(0)'}), '(40, default=0)\n', (712, 727), False, 'from ezdxf.lldxf.attributes import DXFAttr, DXFAttributes, DefSubclass\n'), ((774, 796), 'ezdxf.lldxf.attributes.DXFAttr', 'DXFAttr', (['(41)'], {'default': '(1)'}), '(41, default=1)\n', (781, 796), False, 'from ezdxf.lldxf.attributes import DXFAttr, DXFAttributes, DefSubclass\n'), ((829, 851), 'ezdxf.lldxf.attributes.DXFAttr', 'DXFAttr', (['(50)'], {'default': '(0)'}), '(50, default=0)\n', (836, 851), False, 'from ezdxf.lldxf.attributes import DXFAttr, DXFAttributes, DefSubclass\n'), ((918, 940), 'ezdxf.lldxf.attributes.DXFAttr', 'DXFAttr', (['(71)'], {'default': '(0)'}), '(71, default=0)\n', (925, 940), False, 'from ezdxf.lldxf.attributes import DXFAttr, DXFAttributes, DefSubclass\n'), ((996, 1020), 'ezdxf.lldxf.attributes.DXFAttr', 'DXFAttr', (['(42)'], {'default': '(2.5)'}), '(42, default=2.5)\n', (1003, 1020), False, 'from ezdxf.lldxf.attributes import DXFAttr, DXFAttributes, DefSubclass\n'), ((1054, 1079), 'ezdxf.lldxf.attributes.DXFAttr', 'DXFAttr', (['(3)'], {'default': '"""txt"""'}), "(3, default='txt')\n", (1061, 1079), False, 'from ezdxf.lldxf.attributes import DXFAttr, DXFAttributes, DefSubclass\n'), ((1122, 1144), 'ezdxf.lldxf.attributes.DXFAttr', 'DXFAttr', (['(4)'], {'default': '""""""'}), "(4, default='')\n", (1129, 1144), False, 'from ezdxf.lldxf.attributes import DXFAttr, DXFAttributes, DefSubclass\n')] |
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
from __future__ import absolute_import
import sys
from thrift.util.Recursive import fix_spec
from thrift.Thrift import TType, TMessageType, TPriority, TRequestContext, TProcessorEventHandler, TServerInterface, TProcessor, TException, TApplicationException, UnimplementedTypedef
from thrift.protocol.TProtocol import TProtocolException
from json import loads
import sys
if sys.version_info[0] >= 3:
long = int
import thrift.annotation.thrift.ttypes
import pprint
import warnings
from thrift import Thrift
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.protocol import TCompactProtocol
from thrift.protocol import THeaderProtocol
fastproto = None
try:
from thrift.protocol import fastproto
except ImportError:
pass
all_structs = []
UTF8STRINGS = bool(0) or sys.version_info.major >= 3
__all__ = ['UTF8STRINGS', 'Foo', 'Foo2']
class Foo:
"""
Attributes:
- field1
- field2
- field3
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 3:
if ftype == TType.I32:
self.field1 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.I32:
self.field2 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.field3 = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('Foo')
if self.field2 != None:
oprot.writeFieldBegin('field2', TType.I32, 1)
oprot.writeI32(self.field2)
oprot.writeFieldEnd()
if self.field3 != None:
oprot.writeFieldBegin('field3', TType.I32, 2)
oprot.writeI32(self.field3)
oprot.writeFieldEnd()
if self.field1 != None:
oprot.writeFieldBegin('field1', TType.I32, 3)
oprot.writeI32(self.field1)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def readFromJson(self, json, is_text=True, **kwargs):
relax_enum_validation = bool(kwargs.pop('relax_enum_validation', False))
set_cls = kwargs.pop('custom_set_cls', set)
dict_cls = kwargs.pop('custom_dict_cls', dict)
if kwargs:
extra_kwargs = ', '.join(kwargs.keys())
raise ValueError(
'Unexpected keyword arguments: ' + extra_kwargs
)
json_obj = json
if is_text:
json_obj = loads(json)
if 'field1' in json_obj and json_obj['field1'] is not None:
self.field1 = json_obj['field1']
if self.field1 > 0x7fffffff or self.field1 < -0x80000000:
raise TProtocolException(TProtocolException.INVALID_DATA, 'number exceeds limit in field')
if 'field2' in json_obj and json_obj['field2'] is not None:
self.field2 = json_obj['field2']
if self.field2 > 0x7fffffff or self.field2 < -0x80000000:
raise TProtocolException(TProtocolException.INVALID_DATA, 'number exceeds limit in field')
if 'field3' in json_obj and json_obj['field3'] is not None:
self.field3 = json_obj['field3']
if self.field3 > 0x7fffffff or self.field3 < -0x80000000:
raise TProtocolException(TProtocolException.INVALID_DATA, 'number exceeds limit in field')
def __repr__(self):
L = []
padding = ' ' * 4
if self.field1 is not None:
value = pprint.pformat(self.field1, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field1=%s' % (value))
if self.field2 is not None:
value = pprint.pformat(self.field2, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field2=%s' % (value))
if self.field3 is not None:
value = pprint.pformat(self.field3, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field3=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
__hash__ = object.__hash__
class Foo2:
"""
Attributes:
- field1
- field2
- field3
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 3:
if ftype == TType.I32:
self.field1 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.I32:
self.field2 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.field3 = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('Foo2')
if self.field2 != None:
oprot.writeFieldBegin('field2', TType.I32, 1)
oprot.writeI32(self.field2)
oprot.writeFieldEnd()
if self.field3 != None:
oprot.writeFieldBegin('field3', TType.I32, 2)
oprot.writeI32(self.field3)
oprot.writeFieldEnd()
if self.field1 != None:
oprot.writeFieldBegin('field1', TType.I32, 3)
oprot.writeI32(self.field1)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def readFromJson(self, json, is_text=True, **kwargs):
relax_enum_validation = bool(kwargs.pop('relax_enum_validation', False))
set_cls = kwargs.pop('custom_set_cls', set)
dict_cls = kwargs.pop('custom_dict_cls', dict)
if kwargs:
extra_kwargs = ', '.join(kwargs.keys())
raise ValueError(
'Unexpected keyword arguments: ' + extra_kwargs
)
json_obj = json
if is_text:
json_obj = loads(json)
if 'field1' in json_obj and json_obj['field1'] is not None:
self.field1 = json_obj['field1']
if self.field1 > 0x7fffffff or self.field1 < -0x80000000:
raise TProtocolException(TProtocolException.INVALID_DATA, 'number exceeds limit in field')
if 'field2' in json_obj and json_obj['field2'] is not None:
self.field2 = json_obj['field2']
if self.field2 > 0x7fffffff or self.field2 < -0x80000000:
raise TProtocolException(TProtocolException.INVALID_DATA, 'number exceeds limit in field')
if 'field3' in json_obj and json_obj['field3'] is not None:
self.field3 = json_obj['field3']
if self.field3 > 0x7fffffff or self.field3 < -0x80000000:
raise TProtocolException(TProtocolException.INVALID_DATA, 'number exceeds limit in field')
def __repr__(self):
L = []
padding = ' ' * 4
if self.field1 is not None:
value = pprint.pformat(self.field1, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field1=%s' % (value))
if self.field2 is not None:
value = pprint.pformat(self.field2, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field2=%s' % (value))
if self.field3 is not None:
value = pprint.pformat(self.field3, indent=0)
value = padding.join(value.splitlines(True))
L.append(' field3=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
__hash__ = object.__hash__
all_structs.append(Foo)
Foo.thrift_spec = (
None, # 0
(1, TType.I32, 'field2', None, None, 2, ), # 1
(2, TType.I32, 'field3', None, None, 2, ), # 2
(3, TType.I32, 'field1', None, None, 2, ), # 3
)
Foo.thrift_struct_annotations = {
}
Foo.thrift_field_annotations = {
}
def Foo__init__(self, field1=None, field2=None, field3=None,):
self.field1 = field1
self.field2 = field2
self.field3 = field3
Foo.__init__ = Foo__init__
def Foo__setstate__(self, state):
state.setdefault('field1', None)
state.setdefault('field2', None)
state.setdefault('field3', None)
self.__dict__ = state
Foo.__getstate__ = lambda self: self.__dict__.copy()
Foo.__setstate__ = Foo__setstate__
all_structs.append(Foo2)
Foo2.thrift_spec = (
None, # 0
(1, TType.I32, 'field2', None, None, 2, ), # 1
(2, TType.I32, 'field3', None, None, 2, ), # 2
(3, TType.I32, 'field1', None, None, 2, ), # 3
)
Foo2.thrift_struct_annotations = {
}
Foo2.thrift_field_annotations = {
}
def Foo2__init__(self, field1=None, field2=None, field3=None,):
self.field1 = field1
self.field2 = field2
self.field3 = field3
Foo2.__init__ = Foo2__init__
def Foo2__setstate__(self, state):
state.setdefault('field1', None)
state.setdefault('field2', None)
state.setdefault('field3', None)
self.__dict__ = state
Foo2.__getstate__ = lambda self: self.__dict__.copy()
Foo2.__setstate__ = Foo2__setstate__
fix_spec(all_structs)
del all_structs
| [
"thrift.protocol.fastproto.encode",
"json.loads",
"thrift.protocol.fastproto.decode",
"pprint.pformat",
"thrift.util.Recursive.fix_spec",
"thrift.protocol.TProtocol.TProtocolException"
] | [((13246, 13267), 'thrift.util.Recursive.fix_spec', 'fix_spec', (['all_structs'], {}), '(all_structs)\n', (13254, 13267), False, 'from thrift.util.Recursive import fix_spec\n'), ((1600, 1719), 'thrift.protocol.fastproto.decode', 'fastproto.decode', (['self', 'iprot.trans', '[self.__class__, self.thrift_spec, False]'], {'utf8strings': 'UTF8STRINGS', 'protoid': '(0)'}), '(self, iprot.trans, [self.__class__, self.thrift_spec, \n False], utf8strings=UTF8STRINGS, protoid=0)\n', (1616, 1719), False, 'from thrift.protocol import fastproto\n'), ((2074, 2193), 'thrift.protocol.fastproto.decode', 'fastproto.decode', (['self', 'iprot.trans', '[self.__class__, self.thrift_spec, False]'], {'utf8strings': 'UTF8STRINGS', 'protoid': '(2)'}), '(self, iprot.trans, [self.__class__, self.thrift_spec, \n False], utf8strings=UTF8STRINGS, protoid=2)\n', (2090, 2193), False, 'from thrift.protocol import fastproto\n'), ((4657, 4668), 'json.loads', 'loads', (['json'], {}), '(json)\n', (4662, 4668), False, 'from json import loads\n'), ((5569, 5606), 'pprint.pformat', 'pprint.pformat', (['self.field1'], {'indent': '(0)'}), '(self.field1, indent=0)\n', (5583, 5606), False, 'import pprint\n'), ((5746, 5783), 'pprint.pformat', 'pprint.pformat', (['self.field2'], {'indent': '(0)'}), '(self.field2, indent=0)\n', (5760, 5783), False, 'import pprint\n'), ((5923, 5960), 'pprint.pformat', 'pprint.pformat', (['self.field3'], {'indent': '(0)'}), '(self.field3, indent=0)\n', (5937, 5960), False, 'import pprint\n'), ((7026, 7145), 'thrift.protocol.fastproto.decode', 'fastproto.decode', (['self', 'iprot.trans', '[self.__class__, self.thrift_spec, False]'], {'utf8strings': 'UTF8STRINGS', 'protoid': '(0)'}), '(self, iprot.trans, [self.__class__, self.thrift_spec, \n False], utf8strings=UTF8STRINGS, protoid=0)\n', (7042, 7145), False, 'from thrift.protocol import fastproto\n'), ((7500, 7619), 'thrift.protocol.fastproto.decode', 'fastproto.decode', (['self', 'iprot.trans', '[self.__class__, self.thrift_spec, False]'], {'utf8strings': 'UTF8STRINGS', 'protoid': '(2)'}), '(self, iprot.trans, [self.__class__, self.thrift_spec, \n False], utf8strings=UTF8STRINGS, protoid=2)\n', (7516, 7619), False, 'from thrift.protocol import fastproto\n'), ((10084, 10095), 'json.loads', 'loads', (['json'], {}), '(json)\n', (10089, 10095), False, 'from json import loads\n'), ((10996, 11033), 'pprint.pformat', 'pprint.pformat', (['self.field1'], {'indent': '(0)'}), '(self.field1, indent=0)\n', (11010, 11033), False, 'import pprint\n'), ((11173, 11210), 'pprint.pformat', 'pprint.pformat', (['self.field2'], {'indent': '(0)'}), '(self.field2, indent=0)\n', (11187, 11210), False, 'import pprint\n'), ((11350, 11387), 'pprint.pformat', 'pprint.pformat', (['self.field3'], {'indent': '(0)'}), '(self.field3, indent=0)\n', (11364, 11387), False, 'import pprint\n'), ((3161, 3266), 'thrift.protocol.fastproto.encode', 'fastproto.encode', (['self', '[self.__class__, self.thrift_spec, False]'], {'utf8strings': 'UTF8STRINGS', 'protoid': '(0)'}), '(self, [self.__class__, self.thrift_spec, False],\n utf8strings=UTF8STRINGS, protoid=0)\n', (3177, 3266), False, 'from thrift.protocol import fastproto\n'), ((3582, 3687), 'thrift.protocol.fastproto.encode', 'fastproto.encode', (['self', '[self.__class__, self.thrift_spec, False]'], {'utf8strings': 'UTF8STRINGS', 'protoid': '(2)'}), '(self, [self.__class__, self.thrift_spec, False],\n utf8strings=UTF8STRINGS, protoid=2)\n', (3598, 3687), False, 'from thrift.protocol import fastproto\n'), ((4850, 4938), 'thrift.protocol.TProtocol.TProtocolException', 'TProtocolException', (['TProtocolException.INVALID_DATA', '"""number exceeds limit in field"""'], {}), "(TProtocolException.INVALID_DATA,\n 'number exceeds limit in field')\n", (4868, 4938), False, 'from thrift.protocol.TProtocol import TProtocolException\n'), ((5116, 5204), 'thrift.protocol.TProtocol.TProtocolException', 'TProtocolException', (['TProtocolException.INVALID_DATA', '"""number exceeds limit in field"""'], {}), "(TProtocolException.INVALID_DATA,\n 'number exceeds limit in field')\n", (5134, 5204), False, 'from thrift.protocol.TProtocol import TProtocolException\n'), ((5382, 5470), 'thrift.protocol.TProtocol.TProtocolException', 'TProtocolException', (['TProtocolException.INVALID_DATA', '"""number exceeds limit in field"""'], {}), "(TProtocolException.INVALID_DATA,\n 'number exceeds limit in field')\n", (5400, 5470), False, 'from thrift.protocol.TProtocol import TProtocolException\n'), ((8587, 8692), 'thrift.protocol.fastproto.encode', 'fastproto.encode', (['self', '[self.__class__, self.thrift_spec, False]'], {'utf8strings': 'UTF8STRINGS', 'protoid': '(0)'}), '(self, [self.__class__, self.thrift_spec, False],\n utf8strings=UTF8STRINGS, protoid=0)\n', (8603, 8692), False, 'from thrift.protocol import fastproto\n'), ((9008, 9113), 'thrift.protocol.fastproto.encode', 'fastproto.encode', (['self', '[self.__class__, self.thrift_spec, False]'], {'utf8strings': 'UTF8STRINGS', 'protoid': '(2)'}), '(self, [self.__class__, self.thrift_spec, False],\n utf8strings=UTF8STRINGS, protoid=2)\n', (9024, 9113), False, 'from thrift.protocol import fastproto\n'), ((10277, 10365), 'thrift.protocol.TProtocol.TProtocolException', 'TProtocolException', (['TProtocolException.INVALID_DATA', '"""number exceeds limit in field"""'], {}), "(TProtocolException.INVALID_DATA,\n 'number exceeds limit in field')\n", (10295, 10365), False, 'from thrift.protocol.TProtocol import TProtocolException\n'), ((10543, 10631), 'thrift.protocol.TProtocol.TProtocolException', 'TProtocolException', (['TProtocolException.INVALID_DATA', '"""number exceeds limit in field"""'], {}), "(TProtocolException.INVALID_DATA,\n 'number exceeds limit in field')\n", (10561, 10631), False, 'from thrift.protocol.TProtocol import TProtocolException\n'), ((10809, 10897), 'thrift.protocol.TProtocol.TProtocolException', 'TProtocolException', (['TProtocolException.INVALID_DATA', '"""number exceeds limit in field"""'], {}), "(TProtocolException.INVALID_DATA,\n 'number exceeds limit in field')\n", (10827, 10897), False, 'from thrift.protocol.TProtocol import TProtocolException\n')] |
import subprocess
from ripple.runners.base_runner import BaseRunner
from ripple import logger
import os
import time
import fcntl
class SlurmRunner(BaseRunner):
"""
A job runner for slurm jobs.
"""
def non_block_read(self, output):
fd = output.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
try:
return str(output.read())
except:
return ""
def submit_job(self, job):
"""
I wanted to use pyslurm for this, but I believe it is designed
for admins, rather than end users. I didn't see a nice way to
dispatch jobs. Instead, this will just start a subprocess to
start the job, but then use pyslurm to check the jobs status etc.
Start a subprocess to execute the command
"""
job = self.set_targets(job)
job['output'] = ''
job['err'] = ''
logger.info("In slurm submit command")
cmd = "cd %s; sbatch %s" % (job['target_path'], job['target_name'])
logger.info("Executing '%s'" % cmd)
p = subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Give it a few seconds then report the output
time.sleep(3)
job['output'] = self.non_block_read(p.stdout)
job['err'] = self.non_block_read(p.stderr)
| [
"ripple.logger.info",
"subprocess.Popen",
"time.sleep",
"fcntl.fcntl"
] | [((291, 321), 'fcntl.fcntl', 'fcntl.fcntl', (['fd', 'fcntl.F_GETFL'], {}), '(fd, fcntl.F_GETFL)\n', (302, 321), False, 'import fcntl\n'), ((330, 380), 'fcntl.fcntl', 'fcntl.fcntl', (['fd', 'fcntl.F_SETFL', '(fl | os.O_NONBLOCK)'], {}), '(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)\n', (341, 380), False, 'import fcntl\n'), ((955, 993), 'ripple.logger.info', 'logger.info', (['"""In slurm submit command"""'], {}), "('In slurm submit command')\n", (966, 993), False, 'from ripple import logger\n'), ((1078, 1113), 'ripple.logger.info', 'logger.info', (['("Executing \'%s\'" % cmd)'], {}), '("Executing \'%s\'" % cmd)\n', (1089, 1113), False, 'from ripple import logger\n'), ((1126, 1214), 'subprocess.Popen', 'subprocess.Popen', (['[cmd]'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '([cmd], shell=True, stdout=subprocess.PIPE, stderr=\n subprocess.PIPE)\n', (1142, 1214), False, 'import subprocess\n'), ((1302, 1315), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1312, 1315), False, 'import time\n')] |
from __future__ import print_function
def one(a=123, b='234', c={'3': [4, '5']}):
for i in range(1): # one
a = b = c['side'] = 'effect'
two()
def two(a=123, b='234', c={'3': [4, '5']}):
for i in range(1): # two
a = b = c['side'] = 'effect'
three()
def three(a=123, b='234', c={'3': [4, '5']}):
for i in range(1): # three
a = b = c['side'] = 'effect'
four()
def four(a=123, b='234', c={'3': [4, '5']}):
for i in range(1): # four
a = b = c['side'] = 'effect'
five()
def five(a=123, b='234', c={'3': [4, '5']}):
six()
six()
six()
a = b = c['side'] = in_five = 'effect'
for i in range(1): # five
return i # five
def six():
pass
if __name__ == "__main__":
from hunter import *
from utils import DebugCallPrinter
trace(
Backlog(stack=15, vars=True, action=DebugCallPrinter(' [' 'backlog' ']'), function='five').filter(~Q(function='six')),
action=DebugCallPrinter
)
one()
one() # make sure Backlog is reusable (doesn't have storage side-effects)
stop()
| [
"utils.DebugCallPrinter"
] | [((904, 934), 'utils.DebugCallPrinter', 'DebugCallPrinter', (['""" [backlog]"""'], {}), "(' [backlog]')\n", (920, 934), False, 'from utils import DebugCallPrinter\n')] |
#!/usr/bin/env python
"""Basic operations on dataframes."""
import pandas as pd
# This script isn't for execution, just for notes
# explore the shape and contents of a dataframe
df.describe()
df.info()
df.shape
df.columns
df.index
df.head(10)
df.tail(10)
type(df)
# <class 'pandas.core.frame.DataFrame'>
type(df['a'])
# <class 'pandas.core.series.Series'>
type(df['a'].values)
# <class 'numpy.ndarray'>
# copy a dataframe
df2 = df.copy()
# create a new index column
df.reset_index()
# Drop any row containing 'NaN'
df.dropna()
# double brackets returns dataframe
df[['a']]
df[['a', 'c']]
# single brackets returns a Series
series = df["a"]
# iloc uses numbers, 0 based indexing
# [row, column]
df.iloc[:5, :] # first 5 rows, all columns
df.iloc[-5:, :] # last 5 rows, all columns
df.iloc[:, 1] # all rows, second column
# loc use text labels
df.loc[:, 'b':] # all rows, columns 'b' onwards
df.loc[3, 'a'] # row 4 from column 'a'
# Filter on a multi level index
# a
# one 0 1
# 1 2
# 2 3
# two 0 4
# 1 5
# 2 6
df.loc['one']
# a
# 0 1
# 1 2
# 2 3
# Filter on inner label
idx = pd.IndexSlice
df.loc[idx[:, 1], :]
# a
# one 1 2
# two 1 5
# Convet df column type into a data type
df['a'].astype(int)
# Coerce the column to into a numeric type
df['number'] = pd.to_numeric(df['number'], errors='coerce')
# Apply a lambda function over each row in a df
df['d'] = df['a'].apply(lambda x: x+1)
df['d'] = df.apply(lambda x: x['a']+1, axis=1)
# Drop duplicates
df = df.drop_duplicates()
# Calculate statitstics for a column
df['a'].median()
df['a'].std()
df['a'].mean()
df['a'].mode()
# Fill missing values in a column
df['a'] = df.a.fillna(0)
# Count non null values
df['a'].count()
# Print the 5th and 95th percentiles
df.quantile([0.05, 0.95])
# rolling window across data
# Given
# 0
# 0 0
# 1 1
# 2 2
# 3 3
# 4 4
# 5 5
# 6 6
# 7 7
# 8 8
# 9 9
df.rolling(5).max()
# 0
# 0 NaN
# 1 NaN
# 2 NaN
# 3 NaN
# 4 4.0
# 5 5.0
# 6 6.0
# 7 7.0
# 8 8.0
# 9 9.0
df.rolling(5).mean()
# 0
# 0 NaN
# 1 NaN
# 2 NaN
# 3 NaN
# 4 2.0 (0+1+2+3+4)/5 = 2
# 5 3.0
# 6 4.0
# 7 5.0
# 8 6.0
# 9 7.0
# Sort values based on index
df.sort_index()
df.sort_index(ascending=False)
# Sort values based on column
df.sort_values('Max TemperatureF')
# reindex sorts the index according to a list
# a
# 0 0
# 1 1
# 2 2
# 3 3
# 4 4
df.reindex([4, 3, 2, 1, 0])
# a
# 4 4
# 3 3
# 2 2
# 1 1
# 0 0
# ffil adds values if the new list contains indexes not in the dataframe
df.reindex([0, 1, 2, 3, 4, 5, 6, 7]).ffill()
# a
# 0 0.0
# 1 1.0
# 2 2.0
# 3 3.0
# 4 4.0
# 5 4.0
# 6 4.0
# 7 4.0
| [
"pandas.to_numeric"
] | [((1341, 1385), 'pandas.to_numeric', 'pd.to_numeric', (["df['number']"], {'errors': '"""coerce"""'}), "(df['number'], errors='coerce')\n", (1354, 1385), True, 'import pandas as pd\n')] |
import numpy as np
def permutation_value(num_list, max_num):
return sum([num_list[i]*max_num**(len(num_list)-1-i) for i in range(len(num_list))])
def permutation_update(in_list):
pass
init_array = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
init_array.astype('int64')
#ULØST!
| [
"numpy.array"
] | [((222, 262), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n', (230, 262), True, 'import numpy as np\n')] |
import sys
from PyQt4 import QtGui, QtCore
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
import numpy
from math import sqrt, sin, cos, pi
from geometry.quaternion import Quaternion
class StripChart(QtGui.QWidget):
""" a class to implement a stripchart using the pyqtgraph plotting
utilities
"""
def __init__(self, plt=None, dim=1, relative_time=False, max_pts=200):
super(StripChart, self).__init__()
self.plt = plt
self.curve = []
self.xdata = []
self.ydata = []
for i in range(0,dim):
self.curve.append(plt.plot(pen='w'))
self.ydata.append([])
self.xdata.append([])
self.dim = dim
self.max_pts = max_pts
self._npts = [0,] * dim
self.pens = [None,] * dim
self.brushes = [None,] * dim
self._use_relative_time = relative_time
def _update_plot(self):
offset = 0.0
if self._use_relative_time:
for xd in self.xdata:
if len(xd) == 0:
continue
offset = max(numpy.amax(numpy.array(xd)), offset)
for xd,yd,c,i in zip(self.xdata, self.ydata, self.curve, range(self.dim)):
if numpy.isscalar(xd):
xd = [xd,]
yd = [yd,]
plot_xdata = numpy.array(xd) - offset
plot_ydata = numpy.array(yd)
c.setData(x=plot_xdata, y=plot_ydata)
if self.brushes is not None:
assert len(self.brushes) == self.dim, "Number of brush\
collections must match number of samples"
nbrush = 0
if self.brushes[i] is not None:
c.setBrush(self.brushes[i])
if self.pens is not None:
assert len(self.pens) == self.dim, "Number of pens\
collections must match number of samples"
npen = 0
if self.pens[i] is not None:
c.setPen(self.pens[i])
def update_data(self, x_new, y_new, idx=None, brushes=None, pens=None):
if idx is None:
idx = range(0,self.dim)
if self.dim == 1:
if not isinstance(x_new, tuple):
x_new = (x_new,)
if not isinstance(y_new, tuple):
y_new = (y_new,)
for x,y,i in zip(x_new, y_new, idx):
if self._npts[i] < self.max_pts:
if numpy.isnan(x) or numpy.isnan(y):
continue
self.xdata[i] = numpy.append(self.xdata[i], x)
self.ydata[i] = numpy.append(self.ydata[i], y)
self._npts[i] += 1
else:
if numpy.isnan(x) or numpy.isnan(y):
continue
self.xdata[i] = numpy.append(self.xdata[i][1:], x)
self.ydata[i] = numpy.append(self.ydata[i][1:], y)
if brushes is None:
brushes = [None,]*len(idx)
if pens is None:
pens = [None,]*len(idx)
for b,p,i in zip(brushes, pens, idx):
self.brushes[i] = b
self.pens[i] = p
self._update_plot()
class ImageDisplay(QtGui.QWidget):
""" image view widget
"""
def __init__(self, img=None, img_data=None, is_colorize=False):
super(ImageDisplay, self).__init__()
self._img_view = img
if img_data is not None:
self.img_data = img_data
else:
self.img_data = numpy.zeros((2,2))
self.cmax = 1.0
self.cmin = 0.0
self.is_colorize = is_colorize
def update_data(self, img_new=None):
if img_new is None or self._img_view is None:
return
self.img_data = img_new
if self.is_colorize:
self._img_view.setImage(self.colorize())
else:
self._img_view.setImage(self.img_data)
def colorize(self):
len_x = self.img_data.shape[0]
len_y = self.img_data.shape[1]
c = numpy.zeros((len_x, len_y, 3))
crange = self.cmax - self.cmin
c[:,:,0] = (self.img_data - self.cmin)/crange
c[:,:,1] = 1 - abs(self.img_data/crange)
c[:,:,2] = -(self.img_data - self.cmax)/crange
return c
class xyPlot(QtGui.QWidget):
""" Plot Widget for x-y data
"""
def __init__(self, plt=None, dim=1, xlim=None, ylim=None):
super(xyPlot, self).__init__()
self.plt = plt
self.curve = []
self.xdata = []
self.ydata = []
self.pens = [None,] * dim
self.brushes = [None,] * dim
self._xlim = xlim
self._ylim = ylim
self.dim = dim
def _update_plot(self):
for xd,yd,c,i in zip(self.xdata, self.ydata, self.curve, range(self.dim)):
if numpy.isscalar(xd):
xd = [xd,]
yd = [yd,]
if self.size is not None:
c.setData(x=xd, y=yd, size=self.size)
else:
c.setData(x=xd, y=yd)
if self.brushes is not None:
assert len(self.brushes) == self.dim, "Number of brush\
collections must match number of samples"
nbrush = 0
if self.brushes[i] is not None:
c.setBrush(self.brushes[i])
if self.pens is not None:
assert len(self.pens) == self.dim, "Number of pens\
collections must match number of samples"
npen = 0
if self.pens[i] is not None:
c.setPen(self.pens[i])
if self._xlim:
self.plt.setXRange(self._xlim[0], self._xlim[1])
if self._ylim:
self.plt.setYRange(self._ylim[0], self._ylim[1])
def update_data(self, x_new, y_new, curve_index=None, auto_update=True,
brushes=None, pens=None, size=None):
"""Update the xy plot
Arguments:
x_new: new x data to update. Must either be a numpy array or a tuple
of numpy arrays
y_new: new y data to update. Must either be a numpy array or a tuple
of numpy arrays
curve_index: tuple of indices which indicate the curves which the
tuples in x_new and y_new should update
auto_update: optional, boolean indicating if we should redraw the
plot, defaults to True
brushes: tuple of brushes corresponding to the data to update
pens: tuple of pens corresponding to the data to update
size: not really sure
Returns:
no returns
"""
assert type(x_new) is type(y_new), "x and y data must either be\
numpy arrays or tuples containing them"
if type(x_new) is not tuple:
assert self.dim == 1, "must specify tuple of data if there is\
more htan one data series"
x_new = (x_new,)
y_new = (y_new,)
curve_index = (0,)
assert curve_index is not None, "must specify the data series that\
correspond to data in x_new and y_new tuples"
if brushes is None:
brushes = [None,]*len(curve_index)
if pens is None:
pens = [None,]*len(curve_index)
for xd,yd,i,b,p in zip(x_new, y_new, curve_index, brushes, pens):
self.xdata[i] = xd
self.ydata[i] = yd
if b is not None:
self.brushes[i] = b
if p is not None:
self.pens[i] = p
self.size = size
if auto_update:
self._update_plot()
class ScatterPlot(xyPlot):
""" Widget for scatterplots. Inherits from xyPlot
"""
def __init__(self, plt=None, dim=1):
super(ScatterPlot, self).__init__(plt, dim)
for i in range(0, self.dim):
self.curve.append(pg.ScatterPlotItem(pen='w'))
plt.addItem(self.curve[-1])
self.ydata.append(numpy.zeros((1,)))
self.xdata.append(numpy.zeros((1,)))
class LinePlot(xyPlot):
""" Widget for lineplots. Inherits from xyPlot
"""
def __init__(self, plt=None, dim=1, xlim=None, ylim=None):
super(LinePlot, self).__init__(plt, dim, xlim, ylim)
for i in range(0, self.dim):
self.curve.append(pg.PlotCurveItem(pen='w'))
plt.addItem(self.curve[-1])
self.ydata.append(numpy.zeros((1,)))
self.xdata.append(numpy.zeros((1,)))
| [
"pyqtgraph.PlotCurveItem",
"numpy.isscalar",
"pyqtgraph.ScatterPlotItem",
"numpy.append",
"numpy.array",
"numpy.zeros",
"numpy.isnan"
] | [((4031, 4061), 'numpy.zeros', 'numpy.zeros', (['(len_x, len_y, 3)'], {}), '((len_x, len_y, 3))\n', (4042, 4061), False, 'import numpy\n'), ((1242, 1260), 'numpy.isscalar', 'numpy.isscalar', (['xd'], {}), '(xd)\n', (1256, 1260), False, 'import numpy\n'), ((1392, 1407), 'numpy.array', 'numpy.array', (['yd'], {}), '(yd)\n', (1403, 1407), False, 'import numpy\n'), ((3516, 3535), 'numpy.zeros', 'numpy.zeros', (['(2, 2)'], {}), '((2, 2))\n', (3527, 3535), False, 'import numpy\n'), ((4820, 4838), 'numpy.isscalar', 'numpy.isscalar', (['xd'], {}), '(xd)\n', (4834, 4838), False, 'import numpy\n'), ((1342, 1357), 'numpy.array', 'numpy.array', (['xd'], {}), '(xd)\n', (1353, 1357), False, 'import numpy\n'), ((2564, 2594), 'numpy.append', 'numpy.append', (['self.xdata[i]', 'x'], {}), '(self.xdata[i], x)\n', (2576, 2594), False, 'import numpy\n'), ((2627, 2657), 'numpy.append', 'numpy.append', (['self.ydata[i]', 'y'], {}), '(self.ydata[i], y)\n', (2639, 2657), False, 'import numpy\n'), ((2825, 2859), 'numpy.append', 'numpy.append', (['self.xdata[i][1:]', 'x'], {}), '(self.xdata[i][1:], x)\n', (2837, 2859), False, 'import numpy\n'), ((2892, 2926), 'numpy.append', 'numpy.append', (['self.ydata[i][1:]', 'y'], {}), '(self.ydata[i][1:], y)\n', (2904, 2926), False, 'import numpy\n'), ((7918, 7945), 'pyqtgraph.ScatterPlotItem', 'pg.ScatterPlotItem', ([], {'pen': '"""w"""'}), "(pen='w')\n", (7936, 7945), True, 'import pyqtgraph as pg\n'), ((8017, 8034), 'numpy.zeros', 'numpy.zeros', (['(1,)'], {}), '((1,))\n', (8028, 8034), False, 'import numpy\n'), ((8066, 8083), 'numpy.zeros', 'numpy.zeros', (['(1,)'], {}), '((1,))\n', (8077, 8083), False, 'import numpy\n'), ((8361, 8386), 'pyqtgraph.PlotCurveItem', 'pg.PlotCurveItem', ([], {'pen': '"""w"""'}), "(pen='w')\n", (8377, 8386), True, 'import pyqtgraph as pg\n'), ((8458, 8475), 'numpy.zeros', 'numpy.zeros', (['(1,)'], {}), '((1,))\n', (8469, 8475), False, 'import numpy\n'), ((8507, 8524), 'numpy.zeros', 'numpy.zeros', (['(1,)'], {}), '((1,))\n', (8518, 8524), False, 'import numpy\n'), ((2469, 2483), 'numpy.isnan', 'numpy.isnan', (['x'], {}), '(x)\n', (2480, 2483), False, 'import numpy\n'), ((2487, 2501), 'numpy.isnan', 'numpy.isnan', (['y'], {}), '(y)\n', (2498, 2501), False, 'import numpy\n'), ((2730, 2744), 'numpy.isnan', 'numpy.isnan', (['x'], {}), '(x)\n', (2741, 2744), False, 'import numpy\n'), ((2748, 2762), 'numpy.isnan', 'numpy.isnan', (['y'], {}), '(y)\n', (2759, 2762), False, 'import numpy\n'), ((1116, 1131), 'numpy.array', 'numpy.array', (['xd'], {}), '(xd)\n', (1127, 1131), False, 'import numpy\n')] |
from typing import Tuple, Dict
import h5py
import pandas as pd
import numpy as np
from loguru import logger
from ruamel.yaml import YAML
from joblib import load, dump
from umda import EmbeddingModel
from sklearn.gaussian_process import GaussianProcessRegressor, kernels
from sklearn.neighbors import KNeighborsRegressor
from sklearn.model_selection import KFold, GridSearchCV, ShuffleSplit
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
from sklearn.metrics.pairwise import cosine_distances, euclidean_distances
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
USE_DASK = False
models = {
"linear_regression": [
LinearRegression(fit_intercept=False),
[{"normalize": [True, False],}],
],
"svr": [
SVR(),
[
{
"kernel": ["rbf",],# "poly"],
#"degree": [2, 3, 4],
"C": 10**np.linspace(1.5, 2., 20),
"gamma": ["auto", 0.05, 0.1],
"epsilon": 10**np.linspace(-3., -1., 20),
}
],
],
"knn": [
KNeighborsRegressor(),
[
{
"n_neighbors": [2, 4, 10, 15, 30, 50, 70],
"metric": ["cosine", "euclidean",],
"weights": ["uniform", "distance"]
}
],
],
"rfr": [
RandomForestRegressor(max_features=None, random_state=1205),
[
{"n_estimators": [10, 20, 50, 80, 100, 125, 150, 200],
"max_leaf_nodes": [None, 5, 10, 15, 20, 40],
"min_samples_leaf": [1, 3, 5, 10, 15, 20, 25, 35],
"max_depth": [None, 5, 10, 15, 20]
}
],
],
"gbr": [
GradientBoostingRegressor(random_state=1205),
[
{
"learning_rate": 10 ** np.linspace(-3.0, 1.0, 20),
"n_estimators": [5, 10, 30, 50, 80, 100, 125, 150, 200],
"subsample": [0.2, 0.4, 0.6, 0.8, 1.],
"max_depth": [1, 2, 3, 4, 5, 6]
}
],
],
"gpr": [
None,
[{"alpha": 10 ** np.linspace(-10.0, 1.0, 5), "n_restarts_optimizer": [5, 10, 15, 20]}],
],
}
def standardize_test(
estimator: "sklearn model",
search_params: Tuple[Dict],
data: Tuple[np.ndarray, np.ndarray],
seed: int = 42,
n_jobs: int = 8,
cv: int = 20
):
# split data into X and y for regression
X, y = data
# Manually specify 10-fold cross-validation for the grid search
kfold = KFold(cv, random_state=seed, shuffle=True)
grid_search = GridSearchCV(
estimator,
search_params,
scoring="neg_mean_squared_error",
cv=kfold,
n_jobs=n_jobs,
)
# run the grid search
grid_search.fit(X, y)
# give some summary statistics
y_mask = y != 0.0
y_pred = grid_search.best_estimator_.predict(X)
# masked error is excluding negative examples
mse = metrics.mean_squared_error(y_pred, y)
masked_mse = metrics.mean_squared_error(y_pred[y_mask], y[y_mask])
r2 = metrics.r2_score(y, y_pred)
errors = {"mse": float(mse), "masked_mse": float(masked_mse), "r^2": float(r2)}
return grid_search, grid_search.best_estimator_, errors
def mask_distant_species(
target: np.ndarray, fullset: np.ndarray, upper_percentile: float = 97.
) -> np.ndarray:
distances = cosine_distances(target, fullset)
logger.info(f"Min/max distance: {distances.min()}/{distances.max()}")
logger.info(f"Mean/std distance: {distances.mean()}/{distances.std()}")
lower, mean, upper = np.percentile(distances, [3., 50., upper_percentile])
logger.info(f"3%/50%/{upper_percentile}%: {lower:.3f}/{mean:.3f}/{upper:.3f}")
dist_mask = distances.mean(axis=0) > upper
return dist_mask
def main(
prediction_output: str,
seed: int = 42,
distance_threshold: float = 0.8,
n_jobs: int = 8,
cv: int = 10
):
logger.add("model_training.log")
logger.info(f"Using seed {seed}, cosine distance zeroing: {distance_threshold}")
logger.info(f"Cross-validation will be done with {n_jobs} workers.")
#rng = np.random.default_rng(seed)
logger.info("Loading data")
# prepare and load data
data = h5py.File("../data/processed/pipeline_embeddings_70.h5", "r")
original = h5py.File("../data/processed/smiles_embeddings_300.h5", "r")
pipeline = load("../models/embedding_pipeline.pkl")
pca = load("../models/pca_model.pkl")
embedding_model = load("../models/EmbeddingModel.pkl")
## load in the TMC-1 data and grab the embedding vectors
tmc1_df = pd.read_pickle("../data/processed/tmc1_ready.pkl")
# ignore H2 lol
#tmc1_df = tmc1_df.loc[tmc1_df["canonical"] != "[HH]"]
tmc1_df.reset_index(inplace=True, drop=True)
## get into NumPy array
#tmc1_vecs = np.vstack(tmc1_df["vectors"])
##indices = np.arange(len(data["pca"]))
#for step in pipeline.steps[:2]:
# tmc1_vecs = step[1].transform(tmc1_vecs)
## get the TMC-1 cluster IDs
#tmc1_cluster_ids = pipeline.steps[-1][1].predict(tmc1_vecs)
tmc1_vecs = np.vstack([embedding_model.vectorize(smi) for smi in tmc1_df["canonical"]])
tmc1_cluster_ids = np.array([embedding_model.cluster(smi) for smi in tmc1_df["canonical"]])
#if USE_DASK:
# tmc1_cluster_ids = tmc1_cluster_ids.compute()
## holdout_cluster_ids = pipeline.predict(holdout_vecs).compute()
## compute the PCA embedding for the TMC-1 molecules
#tmc1_embedding = pipeline.steps[0][1].transform(tmc1_vecs)
# holdout_embedding = pipeline.steps[0][1].transform(holdout_vecs)
# for computational efficiency, just grab the most relevant
# molecules to TMC-1
mask = np.zeros_like(data["cluster_ids"], dtype=bool)
for i in np.unique(tmc1_cluster_ids):
mask += data["cluster_ids"][:] == i
logger.info(f"There are {mask.sum()} molecules in the TMC-1 cluster(s)")
# Extract out the molecules contained within our cluster
all_pca = (data["pca"][:])[mask, :]
logger.info(f"Shape of the PCA vectors: {all_pca.shape}")
logger.info(f"Shape of the TMC1-1 vectors: {tmc1_vecs.shape}")
pca_dim = all_pca.shape[-1]
# subset_smiles = (data["smiles"][:])[mask]
# set them as "X" and "Y" for ease of reference
X = tmc1_vecs.copy()
Y = np.log10(tmc1_df["Column density (cm^-2)"].to_numpy())
# convert to abundance
#Y = tmc1_df["Column density (cm^-2)"].to_numpy() / 1e22
# what we want to do now is to set molecules we have little chance of
# detecting to have zero column densities
dist_mask = mask_distant_species(X, all_pca, distance_threshold)
dummies = all_pca[dist_mask,:]
logger.info(f"Setting {dist_mask.sum()} entries to zero column density.")
# logger.info(f"Examples of excluded molecules: {subset_smiles[dist_mask][:5]}")
dummy_y = np.zeros(dummies.shape[0])
logger.info("Preparing training data")
# add the constrained values to our training data
train_x = np.vstack([X, dummies])
train_y = np.hstack([Y, dummy_y])
logger.info(f"Shape of X: {train_x.shape} and Y: {train_y.shape}")
results = dict()
with h5py.File(prediction_output, "a") as h5_output:
try:
del h5_output["tmc1_cluster_mask"]
except:
pass
# save the intercluster mask
h5_output["tmc1_cluster_mask"] = mask
# now do the standardized training and testing for every model
for model_name, conditions in models.items():
# see if we can delete the key
try:
del h5_output[model_name]
except KeyError:
pass
logger.info(f"Performing {cv}-fold CV on {model_name}")
model, hyperparams = conditions
# for gaussian process, define the covariance function
if model_name == "gpr":
kernel = kernels.ConstantKernel() * kernels.RBF(
3.0, (1e-1, 10.0)
) + kernels.RationalQuadratic(
200.0, 20.0, alpha_bounds=(1e-3, 5e2), length_scale_bounds=(50.0, 1e4)
) * kernels.ConstantKernel() + kernels.ConstantKernel()
model = GaussianProcessRegressor(kernel, random_state=1205)
grid, best_model, errors = standardize_test(
model, hyperparams, (train_x, train_y), n_jobs=n_jobs, cv=cv, seed=seed
)
# log the model results
results[model_name] = errors
logger.info(f"Best errors for {model_name}: {errors}")
# pickle the CV grid
dump(grid, f"../models/{model_name}_grid.pkl")
cv_df = pd.DataFrame.from_dict(grid.cv_results_)
cv_df.to_csv(f"../models/{model_name}_grid_summary.csv", index=False)
logger.info(f"Caching predictions for best model")
if model_name != "gpr":
pred_Y = best_model.predict(all_pca)
h5_output[f"{model_name}"] = pred_Y
else:
pred_Y, pred_std = best_model.predict(all_pca, return_std=True)
gpr_tmc_y, gpr_tmc_cov = best_model.predict(
X, return_cov=True
)
# save a bunch of stuff for Gaussian Process
for target, name in zip(
[pred_Y, pred_std, gpr_tmc_y, gpr_tmc_cov],
["all", "all_std", "tmc_reproduction", "tmc_cov"],
):
try:
del h5_output[f"{model_name}_{name}"]
except KeyError:
pass
h5_output[f"{model_name}_{name}"] = target
tmc1_df[model_name] = best_model.predict(X)
tmc1_df.to_csv("tmc1_results.csv", index=False)
# save the errors for later reporting
yaml = YAML()
with open("../models/training_errors.yml", "w+") as write_file:
yaml.dump(results, write_file)
if __name__ == "__main__":
params = {
"prediction_output": "../data/processed/model_predictions.h5",
"seed": 42,
"distance_threshold": 99.98,
"n_jobs": 16,
"cv": 10
}
main(**params)
| [
"sklearn.model_selection.GridSearchCV",
"sklearn.metrics.pairwise.cosine_distances",
"numpy.hstack",
"ruamel.yaml.YAML",
"sklearn.model_selection.KFold",
"sklearn.metrics.r2_score",
"pandas.read_pickle",
"loguru.logger.add",
"sklearn.gaussian_process.GaussianProcessRegressor",
"sklearn.ensemble.Ra... | [((2628, 2670), 'sklearn.model_selection.KFold', 'KFold', (['cv'], {'random_state': 'seed', 'shuffle': '(True)'}), '(cv, random_state=seed, shuffle=True)\n', (2633, 2670), False, 'from sklearn.model_selection import KFold, GridSearchCV, ShuffleSplit\n'), ((2689, 2791), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['estimator', 'search_params'], {'scoring': '"""neg_mean_squared_error"""', 'cv': 'kfold', 'n_jobs': 'n_jobs'}), "(estimator, search_params, scoring='neg_mean_squared_error', cv\n =kfold, n_jobs=n_jobs)\n", (2701, 2791), False, 'from sklearn.model_selection import KFold, GridSearchCV, ShuffleSplit\n'), ((3055, 3092), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_pred', 'y'], {}), '(y_pred, y)\n', (3081, 3092), False, 'from sklearn import metrics\n'), ((3110, 3163), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_pred[y_mask]', 'y[y_mask]'], {}), '(y_pred[y_mask], y[y_mask])\n', (3136, 3163), False, 'from sklearn import metrics\n'), ((3173, 3200), 'sklearn.metrics.r2_score', 'metrics.r2_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (3189, 3200), False, 'from sklearn import metrics\n'), ((3485, 3518), 'sklearn.metrics.pairwise.cosine_distances', 'cosine_distances', (['target', 'fullset'], {}), '(target, fullset)\n', (3501, 3518), False, 'from sklearn.metrics.pairwise import cosine_distances, euclidean_distances\n'), ((3694, 3749), 'numpy.percentile', 'np.percentile', (['distances', '[3.0, 50.0, upper_percentile]'], {}), '(distances, [3.0, 50.0, upper_percentile])\n', (3707, 3749), True, 'import numpy as np\n'), ((3752, 3830), 'loguru.logger.info', 'logger.info', (['f"""3%/50%/{upper_percentile}%: {lower:.3f}/{mean:.3f}/{upper:.3f}"""'], {}), "(f'3%/50%/{upper_percentile}%: {lower:.3f}/{mean:.3f}/{upper:.3f}')\n", (3763, 3830), False, 'from loguru import logger\n'), ((4041, 4073), 'loguru.logger.add', 'logger.add', (['"""model_training.log"""'], {}), "('model_training.log')\n", (4051, 4073), False, 'from loguru import logger\n'), ((4078, 4163), 'loguru.logger.info', 'logger.info', (['f"""Using seed {seed}, cosine distance zeroing: {distance_threshold}"""'], {}), "(f'Using seed {seed}, cosine distance zeroing: {distance_threshold}'\n )\n", (4089, 4163), False, 'from loguru import logger\n'), ((4163, 4231), 'loguru.logger.info', 'logger.info', (['f"""Cross-validation will be done with {n_jobs} workers."""'], {}), "(f'Cross-validation will be done with {n_jobs} workers.')\n", (4174, 4231), False, 'from loguru import logger\n'), ((4275, 4302), 'loguru.logger.info', 'logger.info', (['"""Loading data"""'], {}), "('Loading data')\n", (4286, 4302), False, 'from loguru import logger\n'), ((4342, 4403), 'h5py.File', 'h5py.File', (['"""../data/processed/pipeline_embeddings_70.h5"""', '"""r"""'], {}), "('../data/processed/pipeline_embeddings_70.h5', 'r')\n", (4351, 4403), False, 'import h5py\n'), ((4419, 4479), 'h5py.File', 'h5py.File', (['"""../data/processed/smiles_embeddings_300.h5"""', '"""r"""'], {}), "('../data/processed/smiles_embeddings_300.h5', 'r')\n", (4428, 4479), False, 'import h5py\n'), ((4495, 4535), 'joblib.load', 'load', (['"""../models/embedding_pipeline.pkl"""'], {}), "('../models/embedding_pipeline.pkl')\n", (4499, 4535), False, 'from joblib import load, dump\n'), ((4546, 4577), 'joblib.load', 'load', (['"""../models/pca_model.pkl"""'], {}), "('../models/pca_model.pkl')\n", (4550, 4577), False, 'from joblib import load, dump\n'), ((4600, 4636), 'joblib.load', 'load', (['"""../models/EmbeddingModel.pkl"""'], {}), "('../models/EmbeddingModel.pkl')\n", (4604, 4636), False, 'from joblib import load, dump\n'), ((4712, 4762), 'pandas.read_pickle', 'pd.read_pickle', (['"""../data/processed/tmc1_ready.pkl"""'], {}), "('../data/processed/tmc1_ready.pkl')\n", (4726, 4762), True, 'import pandas as pd\n'), ((5818, 5864), 'numpy.zeros_like', 'np.zeros_like', (["data['cluster_ids']"], {'dtype': 'bool'}), "(data['cluster_ids'], dtype=bool)\n", (5831, 5864), True, 'import numpy as np\n'), ((5878, 5905), 'numpy.unique', 'np.unique', (['tmc1_cluster_ids'], {}), '(tmc1_cluster_ids)\n', (5887, 5905), True, 'import numpy as np\n'), ((6133, 6190), 'loguru.logger.info', 'logger.info', (['f"""Shape of the PCA vectors: {all_pca.shape}"""'], {}), "(f'Shape of the PCA vectors: {all_pca.shape}')\n", (6144, 6190), False, 'from loguru import logger\n'), ((6195, 6257), 'loguru.logger.info', 'logger.info', (['f"""Shape of the TMC1-1 vectors: {tmc1_vecs.shape}"""'], {}), "(f'Shape of the TMC1-1 vectors: {tmc1_vecs.shape}')\n", (6206, 6257), False, 'from loguru import logger\n'), ((6965, 6991), 'numpy.zeros', 'np.zeros', (['dummies.shape[0]'], {}), '(dummies.shape[0])\n', (6973, 6991), True, 'import numpy as np\n'), ((6996, 7034), 'loguru.logger.info', 'logger.info', (['"""Preparing training data"""'], {}), "('Preparing training data')\n", (7007, 7034), False, 'from loguru import logger\n'), ((7103, 7126), 'numpy.vstack', 'np.vstack', (['[X, dummies]'], {}), '([X, dummies])\n', (7112, 7126), True, 'import numpy as np\n'), ((7141, 7164), 'numpy.hstack', 'np.hstack', (['[Y, dummy_y]'], {}), '([Y, dummy_y])\n', (7150, 7164), True, 'import numpy as np\n'), ((7169, 7235), 'loguru.logger.info', 'logger.info', (['f"""Shape of X: {train_x.shape} and Y: {train_y.shape}"""'], {}), "(f'Shape of X: {train_x.shape} and Y: {train_y.shape}')\n", (7180, 7235), False, 'from loguru import logger\n'), ((9974, 9980), 'ruamel.yaml.YAML', 'YAML', ([], {}), '()\n', (9978, 9980), False, 'from ruamel.yaml import YAML\n'), ((764, 801), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (780, 801), False, 'from sklearn.linear_model import LinearRegression\n'), ((872, 877), 'sklearn.svm.SVR', 'SVR', ([], {}), '()\n', (875, 877), False, 'from sklearn.svm import SVR\n'), ((1195, 1216), 'sklearn.neighbors.KNeighborsRegressor', 'KNeighborsRegressor', ([], {}), '()\n', (1214, 1216), False, 'from sklearn.neighbors import KNeighborsRegressor\n'), ((1457, 1516), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'max_features': 'None', 'random_state': '(1205)'}), '(max_features=None, random_state=1205)\n', (1478, 1516), False, 'from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\n'), ((1819, 1863), 'sklearn.ensemble.GradientBoostingRegressor', 'GradientBoostingRegressor', ([], {'random_state': '(1205)'}), '(random_state=1205)\n', (1844, 1863), False, 'from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\n'), ((7266, 7299), 'h5py.File', 'h5py.File', (['prediction_output', '"""a"""'], {}), "(prediction_output, 'a')\n", (7275, 7299), False, 'import h5py\n'), ((7779, 7834), 'loguru.logger.info', 'logger.info', (['f"""Performing {cv}-fold CV on {model_name}"""'], {}), "(f'Performing {cv}-fold CV on {model_name}')\n", (7790, 7834), False, 'from loguru import logger\n'), ((8623, 8677), 'loguru.logger.info', 'logger.info', (['f"""Best errors for {model_name}: {errors}"""'], {}), "(f'Best errors for {model_name}: {errors}')\n", (8634, 8677), False, 'from loguru import logger\n'), ((8723, 8769), 'joblib.dump', 'dump', (['grid', 'f"""../models/{model_name}_grid.pkl"""'], {}), "(grid, f'../models/{model_name}_grid.pkl')\n", (8727, 8769), False, 'from joblib import load, dump\n'), ((8790, 8830), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['grid.cv_results_'], {}), '(grid.cv_results_)\n', (8812, 8830), True, 'import pandas as pd\n'), ((8925, 8975), 'loguru.logger.info', 'logger.info', (['f"""Caching predictions for best model"""'], {}), "(f'Caching predictions for best model')\n", (8936, 8975), False, 'from loguru import logger\n'), ((8323, 8374), 'sklearn.gaussian_process.GaussianProcessRegressor', 'GaussianProcessRegressor', (['kernel'], {'random_state': '(1205)'}), '(kernel, random_state=1205)\n', (8347, 8374), False, 'from sklearn.gaussian_process import GaussianProcessRegressor, kernels\n'), ((1012, 1037), 'numpy.linspace', 'np.linspace', (['(1.5)', '(2.0)', '(20)'], {}), '(1.5, 2.0, 20)\n', (1023, 1037), True, 'import numpy as np\n'), ((1115, 1142), 'numpy.linspace', 'np.linspace', (['(-3.0)', '(-1.0)', '(20)'], {}), '(-3.0, -1.0, 20)\n', (1126, 1142), True, 'import numpy as np\n'), ((1928, 1954), 'numpy.linspace', 'np.linspace', (['(-3.0)', '(1.0)', '(20)'], {}), '(-3.0, 1.0, 20)\n', (1939, 1954), True, 'import numpy as np\n'), ((2217, 2243), 'numpy.linspace', 'np.linspace', (['(-10.0)', '(1.0)', '(5)'], {}), '(-10.0, 1.0, 5)\n', (2228, 2243), True, 'import numpy as np\n'), ((8274, 8298), 'sklearn.gaussian_process.kernels.ConstantKernel', 'kernels.ConstantKernel', ([], {}), '()\n', (8296, 8298), False, 'from sklearn.gaussian_process import GaussianProcessRegressor, kernels\n'), ((8007, 8031), 'sklearn.gaussian_process.kernels.ConstantKernel', 'kernels.ConstantKernel', ([], {}), '()\n', (8029, 8031), False, 'from sklearn.gaussian_process import GaussianProcessRegressor, kernels\n'), ((8034, 8063), 'sklearn.gaussian_process.kernels.RBF', 'kernels.RBF', (['(3.0)', '(0.1, 10.0)'], {}), '(3.0, (0.1, 10.0))\n', (8045, 8063), False, 'from sklearn.gaussian_process import GaussianProcessRegressor, kernels\n'), ((8105, 8213), 'sklearn.gaussian_process.kernels.RationalQuadratic', 'kernels.RationalQuadratic', (['(200.0)', '(20.0)'], {'alpha_bounds': '(0.001, 500.0)', 'length_scale_bounds': '(50.0, 10000.0)'}), '(200.0, 20.0, alpha_bounds=(0.001, 500.0),\n length_scale_bounds=(50.0, 10000.0))\n', (8130, 8213), False, 'from sklearn.gaussian_process import GaussianProcessRegressor, kernels\n'), ((8247, 8271), 'sklearn.gaussian_process.kernels.ConstantKernel', 'kernels.ConstantKernel', ([], {}), '()\n', (8269, 8271), False, 'from sklearn.gaussian_process import GaussianProcessRegressor, kernels\n')] |
"""Module that allows QE to interface with cephadm bootstrap CLI."""
import logging
from typing import Dict
from ceph.ceph import ResourceNotFoundError
from utility.utils import get_cephci_config
from .common import config_dict_to_string
from .typing_ import CephAdmProtocol
logger = logging.getLogger(__name__)
class BootstrapMixin:
"""Add bootstrap support to the child class."""
def bootstrap(self: CephAdmProtocol, config: Dict) -> None:
"""
Execute cephadm bootstrap with the passed kwargs on the installer node.
Bootstrap involves,
- Creates /etc/ceph directory with permissions
- CLI creation with bootstrap options with custom/default image
- Execution of bootstrap command
Args:
config: Key/value pairs passed from the test case.
Example:
config:
command: bootstrap
base_cmd_args:
verbose: true
args:
custom_image: true | false
mon-ip: <node_name>
mgr-id: <mgr_id>
fsid: <id>
"""
self.cluster.setup_ssh_keys()
self.set_tool_repo()
self.install()
cdn_cred = get_cephci_config().get("cdn_credentials")
cmd = "cephadm"
if config.get("base_cmd_args"):
cmd += config_dict_to_string(config["base_cmd_args"])
args = config.get("args")
custom_image = args.pop("custom_image", True)
if custom_image:
cmd += f" --image {self.config['container_image']}"
cmd += " bootstrap"
custom_image_args = (
" --registry-url registry.redhat.io"
" --registry-username {user}"
" --registry-password {password}"
)
cmd += custom_image_args.format(
user=cdn_cred.get("username"),
password=cdn_cred.get("password"),
)
# To be generic, the mon-ip contains the global node name. Here, we replace the
# name with the IP address. The replacement allows us to be inline with the
# CLI option.
# Todo: need to switch installer node on any other node name provided
# other than installer node
mon_node = args.pop("mon-ip", self.installer.node.shortname)
if mon_node:
for node in self.cluster.get_nodes():
if mon_node in node.shortname:
cmd += f" --mon-ip {node.ip_address}"
break
else:
raise ResourceNotFoundError(f"Unknown {mon_node} node name.")
cmd += config_dict_to_string(args)
out, err = self.installer.exec_command(
sudo=True,
cmd=cmd,
timeout=1800,
check_ec=True,
)
logger.info("Bootstrap output : %s", out.read().decode())
logger.error("Bootstrap error: %s", err.read().decode())
self.distribute_cephadm_gen_pub_key()
| [
"logging.getLogger",
"utility.utils.get_cephci_config",
"ceph.ceph.ResourceNotFoundError"
] | [((287, 314), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (304, 314), False, 'import logging\n'), ((1266, 1285), 'utility.utils.get_cephci_config', 'get_cephci_config', ([], {}), '()\n', (1283, 1285), False, 'from utility.utils import get_cephci_config\n'), ((2593, 2648), 'ceph.ceph.ResourceNotFoundError', 'ResourceNotFoundError', (['f"""Unknown {mon_node} node name."""'], {}), "(f'Unknown {mon_node} node name.')\n", (2614, 2648), False, 'from ceph.ceph import ResourceNotFoundError\n')] |
from subprocess import call
from os import path
class Task:
def __init__(self, data, pkg_dir):
self.action = data["action"]
self.source = data["source"]
self.target = data["target"]
self.pkg_dir = pkg_dir
def run(self, command, test_mode=False):
if test_mode:
return False
result = call(command)
return result == 0
def present(self, target):
return "[{:s}] {:s} -> {:s}".format(self.action, self.source, self.lookup(target))
def lookup(self, target):
if target is None:
return path.expanduser(self.target)
else:
original = self.target.strip("~/")
return path.join(target.rstrip("/"), original.lstrip("/"))
class LinkTask(Task):
def run(self, target, test_mode=False):
source = path.join(path.abspath(self.pkg_dir), self.source)
target = super(LinkTask, self).lookup(target)
command_line = ["ln", "-s", source, target]
return super(LinkTask, self).run(command_line, test_mode)
class MkdirTask(Task):
def run(self, target, test_mode=False):
target = super(MkdirTask, self).lookup(target)
command_line = ["mkdir", "-p", target]
return super(MkdirTask, self).run(command_line, test_mode)
| [
"os.path.abspath",
"subprocess.call",
"os.path.expanduser"
] | [((320, 333), 'subprocess.call', 'call', (['command'], {}), '(command)\n', (324, 333), False, 'from subprocess import call\n'), ((539, 567), 'os.path.expanduser', 'path.expanduser', (['self.target'], {}), '(self.target)\n', (554, 567), False, 'from os import path\n'), ((773, 799), 'os.path.abspath', 'path.abspath', (['self.pkg_dir'], {}), '(self.pkg_dir)\n', (785, 799), False, 'from os import path\n')] |
def resolve():
'''
code here
'''
from sys import stdin
N = int(input())
def lcs(a, b):
num_a = len(a)
num_b = len(b)
dp2 = [0]*(num_b+1)
for i in range(num_a):
dp1 = dp2[:]
for j in range(num_b):
if dp2[j] >= dp1[j+1]: dp2[j+1] = dp2[j]
else: dp2[j+1] = dp1[j+1]
if a[i] == b[j]:
if dp2[j+1] >= dp1[j]+1: dp2[j+1] = dp2[j+1]
else: dp2[j+1] = dp1[j]+1
return dp2[num_b]
for _ in range(N):
print(lcs(stdin.readline().strip(), stdin.readline().strip()))
if __name__ == "__main__":
resolve()
| [
"sys.stdin.readline"
] | [((613, 629), 'sys.stdin.readline', 'stdin.readline', ([], {}), '()\n', (627, 629), False, 'from sys import stdin\n'), ((639, 655), 'sys.stdin.readline', 'stdin.readline', ([], {}), '()\n', (653, 655), False, 'from sys import stdin\n')] |
import partitura as pt
def addnote(midipitch, part, voice, start, end, idx):
"""
adds a single note by midiptich to a part
"""
offset = midipitch%12
octave = int(midipitch-offset)/12
name = [("C",0),
("C",1),
("D",0),
("D",1),
("E",0),
("F",0),
("F",1),
("G",0),
("G",1),
("A",0),
("A",1),
("B",0)]
# print( id, start, end, offset)
step, alter = name[int(offset)]
part.add(pt.score.Note(id='n{}'.format(idx), step=step,
octave=int(octave), alter=alter, voice=voice),
start=start, end=end)
def partFromProgression(prog, quarter_duration = 4 ):
part = pt.score.Part('P0', 'part from progression', quarter_duration=quarter_duration)
for i, c in enumerate(prog.chords):
for j, pitch in enumerate(c.pitches):
addnote(pitch, part, j, i*quarter_duration, (i+1)*quarter_duration, str(j)+str(i))
return part | [
"partitura.score.Part"
] | [((775, 854), 'partitura.score.Part', 'pt.score.Part', (['"""P0"""', '"""part from progression"""'], {'quarter_duration': 'quarter_duration'}), "('P0', 'part from progression', quarter_duration=quarter_duration)\n", (788, 854), True, 'import partitura as pt\n')] |
import re
import textwrap
import discord
import asyncio
import random
import os
client = discord.Client()
token = os.environ['DISCORD_BOT_TOKEN']
@client.event
async def on_ready():
await client.change_presence(activity=discord.Game(name=',help | v1.0β'))
# or, for watching:
activity = discord.Activity(name=',help | v1.0β', type=discord.ActivityType.playing)
await client.change_presence(activity=activity)
channel = client.get_channel(680727914614095903)
await channel.send('>>> **Normal Bot**が起動しました。')
@client.event
async def on_message(message):
# Bot自身が送ったメッセージの場合は処理しない
if message.author.bot:
return
# Helpコマンド
if message.content == ',help':
# ヘルプのメッセージを作成(ヒアドキュメントを使って視覚的に見やすくしました)
help_msg = textwrap.dedent('''\
**__Normal Bot__** **Help Menu**
> `,create name` チャンネルを作ることができます
> `,updata` 最新のアップデータ情報を確認することができます
> `,alldelete`実行されたチャンネルのメッセージをすべて削除します。**管理者権限必須**
> `,help` このHelpMenuです。
''')
await message.channel.send(help_msg)
# Createコマンド
CREATE_COMMAND = ",create "
if message.content.startswith(CREATE_COMMAND):
# 「,create abcdef GHI」というメッセージから「abcdef GHI」のみを取り出す
ch_name = re.sub(CREATE_COMMAND, "", message.content)
# Discordではチャンネル名にスペースが使えないため、ハイフンに置き換える
ch_name = re.sub("\\s+", "-", ch_name)
# 取り出した結果が無ければ終了
if len(ch_name) < 1:
return
category = client.get_channel(605885674628841476)
ch = await category.create_text_channel(name=ch_name)
await message.channel.send(f"{ch.mention} を作成しました。")
# Create単体helpコマンド
if message.content == ',create':
await message.channel.send('`,create name` と入力することでチャンネルを作ることができます')
# updataコマンド
if message.content == ',updata':
await message.channel.send('**v1.2 Updata** \n以下のコマンドを追加しました\n>>> `,alldel`そのチャンネルのすべてのメッセージを削除します。**管理者権限必須**')
# alldeleteコマンド
if message.content == ',alldel':
if message.author.guild_permissions.administrator:
await message.channel.purge()
await message.channel.send('> このチャンネルのすべてのメッセージを削除しました。')
else:
await message.channel.send('> あなたはこのコマンドを実行する権限がありません!')
#運営募集コマンド
# news
if message.content == ',news':
# bosyuのメッセージを作成(ヒアドキュメントを使って視覚的に見やすくしました)
bosyu_msg = textwrap.dedent('''\
**__Wakame NetWork News__**
>>>公式Wikiサイトを作りました!
こちらからアクセスできます!
``http://bid.do/wikinet``
<@&605725636241129482>
''')
if message.author.guild_permissions.administrator:
await message.channel.send(bosyu_msg)
else:
await message.channel.send('> あなたはこのコマンドを実行する権限がありません!')
if message.content == ',kudel':
if message.author.guild_permissions.administrator:
await message.channel.send('> <@514349162519592963> のメッセージを削除しました。>')
else:
await message.channel.send('> あなたはこのコマンドを実行する権限がありません!')
client.run(token)
| [
"textwrap.dedent",
"discord.Game",
"discord.Activity",
"discord.Client",
"re.sub"
] | [((90, 106), 'discord.Client', 'discord.Client', ([], {}), '()\n', (104, 106), False, 'import discord\n'), ((302, 375), 'discord.Activity', 'discord.Activity', ([], {'name': '""",help | v1.0β"""', 'type': 'discord.ActivityType.playing'}), "(name=',help | v1.0β', type=discord.ActivityType.playing)\n", (318, 375), False, 'import discord\n'), ((775, 1082), 'textwrap.dedent', 'textwrap.dedent', (['""" **__Normal Bot__** **Help Menu**\n > `,create name` チャンネルを作ることができます\n > `,updata` 最新のアップデータ情報を確認することができます\n > `,alldelete`実行されたチャンネルのメッセージをすべて削除します。**管理者権限必須** \n > `,help` このHelpMenuです。\n \n """'], {}), '(\n """ **__Normal Bot__** **Help Menu**\n > `,create name` チャンネルを作ることができます\n > `,updata` 最新のアップデータ情報を確認することができます\n > `,alldelete`実行されたチャンネルのメッセージをすべて削除します。**管理者権限必須** \n > `,help` このHelpMenuです。\n \n """\n )\n', (790, 1082), False, 'import textwrap\n'), ((1302, 1345), 're.sub', 're.sub', (['CREATE_COMMAND', '""""""', 'message.content'], {}), "(CREATE_COMMAND, '', message.content)\n", (1308, 1345), False, 'import re\n'), ((1414, 1442), 're.sub', 're.sub', (['"""\\\\s+"""', '"""-"""', 'ch_name'], {}), "('\\\\s+', '-', ch_name)\n", (1420, 1442), False, 'import re\n'), ((2469, 2711), 'textwrap.dedent', 'textwrap.dedent', (['""" **__Wakame NetWork News__** \n \n >>>公式Wikiサイトを作りました!\n こちらからアクセスできます!\n\n ``http://bid.do/wikinet``\n\n <@&605725636241129482>\n \n """'], {}), '(\n """ **__Wakame NetWork News__** \n \n >>>公式Wikiサイトを作りました!\n こちらからアクセスできます!\n\n ``http://bid.do/wikinet``\n\n <@&605725636241129482>\n \n """\n )\n', (2484, 2711), False, 'import textwrap\n'), ((226, 260), 'discord.Game', 'discord.Game', ([], {'name': '""",help | v1.0β"""'}), "(name=',help | v1.0β')\n", (238, 260), False, 'import discord\n')] |
"""
Created on 21 Aug 2013
@author: Anna
"""
import math
import numpy
import random
from .Globals import G
class Allocation:
def __init__(self, itemList, week, altRoutes, excBuffer):
self.week = week
self.altRoutes = altRoutes
self.itemList = itemList
self.excBuffer = excBuffer
def Run(self):
for CurrentMA in self.itemList:
# call the allocation methods based on the step (e.g. allocation on same route or allocation on alternative routes)
if self.altRoutes == 1:
self.alternativeRoutes(CurrentMA)
else:
self.allocationStd(CurrentMA)
# put items in output buffer (temporary buffer for excess units to be allocated)
if CurrentMA.qty > 0:
self.excBuffer.append(CurrentMA)
# allocate item on its own route
def allocationStd(self, MA):
sufficient = True # flag that shows if we have sufficient capacity
# read the capacity that the MA requires
requiredCapacity = {}
for x in G.RouteDict[MA.MAid]["route"]:
requiredCapacity[x] = G.RouteDict[MA.MAid]["route"][x] * MA.qty
# read the remaining capacity for thegiven week and subtract the required from it
remainingCapacity = {}
for bottleneck in G.CurrentCapacityDict:
remainingCapacity[bottleneck] = (
G.CurrentCapacityDict[bottleneck][self.week]
- requiredCapacity[bottleneck]
)
# if we dropped below zero then the capacity is not sufficient
if remainingCapacity[bottleneck] < 0:
sufficient = False
# check if there is sufficient capacity to process the order
if sufficient:
# update remaining capacity
allocableQty = MA.qty
if MA.qty >= G.minPackingSize:
for bottleneck in G.CurrentCapacityDict:
G.CurrentCapacityDict[bottleneck][self.week] = remainingCapacity[
bottleneck
]
# if the capacity available is not sufficient, the max allocable qty is derived
else:
# calculate max qty allocable
# excessUnits = [0 for i in range(len(requiredCapacity))]
excessUnits = {}
excess = 0
for bottleneck in remainingCapacity:
if (
requiredCapacity[bottleneck] > 0
and remainingCapacity[bottleneck] < 0
):
excessUnits = (
remainingCapacity[bottleneck]
/ G.RouteDict[MA.MAid]["route"][bottleneck]
)
if math.ceil(math.fabs(excessUnits)) > excess:
excess = math.ceil(math.fabs(excessUnits))
# update remaining capacity
assert excess <= MA.qty or MA.qty < G.minPackingSize
allocableQty = MA.qty - excess
if allocableQty >= G.minPackingSize:
# rCap = numpy.array(G.currentCapacity[self.week]) - numpy.multiply(allocableQty,G.route[MA.MAid])
for bottleneck in G.CurrentCapacityDict:
G.CurrentCapacityDict[bottleneck][self.week] -= (
allocableQty * G.RouteDict[MA.MAid]["route"][bottleneck]
)
# update attributes/variables affected by allocation
if allocableQty >= G.minPackingSize:
MA.qty -= allocableQty
MA.minQty = max([0, MA.minQty - allocableQty])
# update allocation output variable
# distinguish case of FutureDemand from PPOSdemand
if MA.future == 1:
G.AllocationFuture[G.replication].append(
[MA.orderID, MA.MAid, allocableQty, self.week + 1]
)
G.FutureLateness[G.replication] += (
max([0, self.week - MA.originalWeek]) * allocableQty
)
G.FutureEarliness[G.replication] += (
max([0, MA.originalWeek - self.week]) * allocableQty
)
else:
G.AllocationPPOS[G.replication].append(
[MA.orderID, MA.MAid, allocableQty, self.week + 1]
)
G.PPOSLateness[G.replication] += (
max([0, self.week - MA.originalWeek]) * allocableQty
)
G.PPOSEarliness[G.replication] += (
max([0, MA.originalWeek - self.week]) * allocableQty
)
def alternativeRoutes(self, MA):
sufficient = False # flag that shows if we have sufficient capacity
# identify MAs with the same SP as the MA investigated
alternativeMADict = (
{}
) # FIXME: the PPOS attribute can be used instead for the current MA
# loop through the MAinfo
for alernativeMA in G.RouteDict:
# if it is the same MA do not consider it
if alernativeMA == MA.MAid:
continue
# if the alternative MA is of the same SP add it to the list
PPOS = G.RouteDict[alernativeMA]["PPOS"]
SP = G.RouteDict[alernativeMA]["SP"]
if PPOS == MA.PPOSid and SP == MA.SPid:
alternativeMADict[alernativeMA] = G.RouteDict[alernativeMA]
# calculate max number of units for each alternative MA
maxUnits = {}
for alternativeMA in alternativeMADict:
MAunits = []
for routeElement in alternativeMADict[alternativeMA]["route"]:
units = alternativeMADict[alternativeMA]["route"][routeElement]
if units != 0:
MAunits.append(
G.CurrentCapacityDict[routeElement][self.week] / units
)
sufficient = True
maxUnits[alternativeMA] = math.floor(min(MAunits))
# choose MA with max number of units
if maxUnits and sufficient:
maxU = 0
maxID = []
for MAid in maxUnits:
if maxUnits[MAid] > maxU:
maxU = maxUnits[MAid]
maxID = [MAid]
if maxUnits[MAid] == maxU:
maxID.append(MAid)
# choose MA randomly among those with max number of units
chosenMAId = random.choice(maxID)
allocableQty = min([maxU, MA.qty])
if allocableQty >= G.minPackingSize:
for bottleneck in G.CurrentCapacityDict:
G.CurrentCapacityDict[bottleneck][self.week] -= (
allocableQty * G.RouteDict[chosenMAId]["route"][bottleneck]
)
# update attributes/variables affected by allocation
MA.qty -= allocableQty
MA.minQty = max([0, MA.minQty - allocableQty])
# update allocation output variable
# distinguish case of FutureDemand from PPOSdemand
if MA.future == 1:
G.AllocationFuture[G.replication].append(
[MA.orderID, chosenMAId, allocableQty, self.week + 1]
)
G.FutureLateness[G.replication] += (
max([0, self.week - MA.originalWeek]) * allocableQty
)
G.FutureEarliness[G.replication] += (
max([0, MA.originalWeek - self.week]) * allocableQty
)
else:
G.AllocationPPOS[G.replication].append(
[MA.orderID, chosenMAId, allocableQty, self.week + 1]
)
G.PPOSLateness[G.replication] += (
max([0, self.week - MA.originalWeek]) * allocableQty
)
G.PPOSEarliness[G.replication] += (
max([0, MA.originalWeek - self.week]) * allocableQty
)
| [
"random.choice",
"math.fabs"
] | [((6488, 6508), 'random.choice', 'random.choice', (['maxID'], {}), '(maxID)\n', (6501, 6508), False, 'import random\n'), ((2776, 2798), 'math.fabs', 'math.fabs', (['excessUnits'], {}), '(excessUnits)\n', (2785, 2798), False, 'import math\n'), ((2853, 2875), 'math.fabs', 'math.fabs', (['excessUnits'], {}), '(excessUnits)\n', (2862, 2875), False, 'import math\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__copyright__ = "Copyright (C) <NAME>"
import csv
import logging
import os
import signal
import socket
import sys
import time
import paho.mqtt.client as mqtt
import protobix
if sys.version_info >= (3, 0):
import configparser
else:
import ConfigParser as configparser
class ZabbixMQTT:
def __init__(self, config_path="/etc/mqtt-zabbix/mqtt-zabbix.cfg"):
# Read the config file
config = configparser.RawConfigParser()
config.read(config_path)
# Use ConfigParser to pick out the settings
self.DEBUG = config.getboolean("global", "DEBUG")
self.LOGFILE = config.get("global", "LOGFILE")
self.MQTT_HOST = config.get("global", "MQTT_HOST")
self.MQTT_PORT = config.getint("global", "MQTT_PORT")
self.MQTT_TOPIC = config.get("global", "MQTT_TOPIC")
self.KEYHOST = config.get("global", "KEYHOST")
self.KEYFILE = config.get("global", "KEYFILE")
self.APPNAME = "mqtt-zabbix"
self.PRESENCETOPIC = "clients/" + socket.getfqdn() + "/" + self.APPNAME + "/state"
client_id = self.APPNAME + "_%d" % os.getpid()
self.mqttc = mqtt.Client(client_id=client_id)
LOGFORMAT = "%(asctime)-15s %(message)s"
if self.DEBUG:
logging.basicConfig(filename=self.LOGFILE,
level=logging.DEBUG,
format=LOGFORMAT)
else:
logging.basicConfig(filename=self.LOGFILE,
level=logging.INFO,
format=LOGFORMAT)
self.zbx_sender = protobix.DataContainer()
def start(self):
# Use the signal module to handle signals
signal.signal(signal.SIGTERM, self.cleanup)
signal.signal(signal.SIGINT, self.cleanup)
# Connect to the broker
self.connect()
# Try to loop_forever until interrupted
try:
self.mqttc.loop_forever()
except KeyboardInterrupt:
logging.info("Interrupted by keypress")
sys.exit(0)
def on_publish(self, mosq, obj, mid):
"""
What to do when a message is published
"""
logging.debug("MID " + str(mid) + " published.")
def on_subscribe(self, mosq, obj, mid, qos_list):
"""
What to do in the event of subscribing to a topic"
"""
logging.debug("Subscribe with mid " + str(mid) + " received.")
def on_unsubscribe(self, mosq, obj, mid):
"""
What to do in the event of unsubscribing from a topic
"""
logging.debug("Unsubscribe with mid " + str(mid) + " received.")
def on_connect(self, self2, mosq, obj, result_code):
"""
Handle connections (or failures) to the broker.
This is called after the client has received a CONNACK message
from the broker in response to calling connect().
The parameter rc is an integer giving the return code:
0: Success
1: Refused â unacceptable protocol version
2: Refused â identifier rejected
3: Refused â server unavailable
4: Refused â bad user name or password (MQTT v3.1 broker only)
5: Refused â not authorised (MQTT v3.1 broker only)
"""
logging.debug("on_connect RC: " + str(result_code))
if result_code == 0:
logging.info("Connected to %s:%s", self.MQTT_HOST, self.MQTT_PORT)
# Publish retained LWT as per
# http://stackoverflow.com/q/97694
# See also the will_set function in connect() below
self.mqttc.publish(self.PRESENCETOPIC, "1", retain=True)
self.process_connection()
elif result_code == 1:
logging.info("Connection refused - unacceptable protocol version")
self.cleanup()
elif result_code == 2:
logging.info("Connection refused - identifier rejected")
self.cleanup()
elif result_code == 3:
logging.info("Connection refused - server unavailable")
logging.info("Retrying in 30 seconds")
time.sleep(30)
elif result_code == 4:
logging.info("Connection refused - bad user name or password")
self.cleanup()
elif result_code == 5:
logging.info("Connection refused - not authorised")
self.cleanup()
else:
logging.warning("Something went wrong. RC:" + str(result_code))
self.cleanup()
def on_disconnect(self, mosq, obj, result_code):
"""
Handle disconnections from the broker
"""
if result_code == 0:
logging.info("Clean disconnection")
else:
logging.info("Unexpected disconnection! Reconnecting in 5 seconds")
logging.debug("Result code: %s", result_code)
time.sleep(5)
def on_message(self, mosq, obj, msg):
"""
What to do when the client recieves a message from the broker
"""
logging.debug("Received: " + str(msg.payload) +
" received on topic " + str(msg.topic) +
" with QoS " + str(msg.qos))
self.process_message(msg)
def on_log(self, mosq, obj, level, string):
"""
What to do with debug log output from the MQTT library
"""
logging.debug(string)
def process_connection(self):
"""
What to do when a new connection is established
"""
logging.debug("Processing connection")
self.mqttc.subscribe(self.MQTT_TOPIC, 2)
def process_message(self, msg):
"""
What to do with the message that's arrived.
Looks up the topic in the KeyMap dictionary, and forwards
the message onto Zabbix using the associated Zabbix key
"""
logging.debug("Processing : " + msg.topic)
topics = self.get_topics()
if msg.topic in topics:
if msg.payload == "ON":
msg.payload = 1
if msg.payload == "OFF":
msg.payload = 0
zbx_key = topics[msg.topic]
logging.info("Sending %s %s to Zabbix for key %s", msg.topic, msg.payload, zbx_key)
self.zbx_sender.data_type = 'items'
self.zbx_sender.add_item(self.KEYHOST, zbx_key, str(msg.payload))
self.zbx_sender.send()
else:
# Received something with a /raw/ topic,
# but it didn't match anything. Log it, and discard it
logging.debug("Unknown: %s", msg.topic)
def cleanup(self, signum=15, frame=None):
"""
Signal handler to ensure we disconnect cleanly
in the event of a SIGTERM or SIGINT.
"""
logging.info("Disconnecting from broker")
# Publish a retained message to state that this client is offline
self.mqttc.publish(self.PRESENCETOPIC, "0", retain=True)
self.mqttc.disconnect()
logging.info("Exiting on signal %d", signum)
sys.exit(signum)
def connect(self):
"""
Connect to the broker, define the callbacks, and subscribe
This will also set the Last Will and Testament (LWT)
The LWT will be published in the event of an unclean or
unexpected disconnection.
"""
logging.debug("Connecting to %s:%s", self.MQTT_HOST, self.MQTT_PORT)
# Set the Last Will and Testament (LWT) *before* connecting
self.mqttc.will_set(self.PRESENCETOPIC, "0", qos=0, retain=True)
result = self.mqttc.connect(self.MQTT_HOST, self.MQTT_PORT, 60)
if result != 0:
logging.info("Connection failed with error code %s. Retrying", result)
time.sleep(10)
self.connect()
# Define the callbacks
self.mqttc.on_connect = self.on_connect
self.mqttc.on_disconnect = self.on_disconnect
self.mqttc.on_publish = self.on_publish
self.mqttc.on_subscribe = self.on_subscribe
self.mqttc.on_unsubscribe = self.on_unsubscribe
self.mqttc.on_message = self.on_message
if self.DEBUG:
self.mqttc.on_log = self.on_log
def get_topics(self):
"""
Read the topics and keys into a dictionary for internal lookups
"""
logging.debug("Loading map")
with open(self.KEYFILE, mode="r") as inputfile:
reader = csv.reader(inputfile)
return dict((rows[0], rows[1]) for rows in reader)
if __name__ == "__main__":
zbx_mqtt = ZabbixMQTT()
zbx_mqtt.start()
| [
"logging.basicConfig",
"signal.signal",
"logging.debug",
"ConfigParser.RawConfigParser",
"socket.getfqdn",
"paho.mqtt.client.Client",
"time.sleep",
"protobix.DataContainer",
"csv.reader",
"sys.exit",
"os.getpid",
"logging.info"
] | [((487, 517), 'ConfigParser.RawConfigParser', 'configparser.RawConfigParser', ([], {}), '()\n', (515, 517), True, 'import ConfigParser as configparser\n'), ((1214, 1246), 'paho.mqtt.client.Client', 'mqtt.Client', ([], {'client_id': 'client_id'}), '(client_id=client_id)\n', (1225, 1246), True, 'import paho.mqtt.client as mqtt\n'), ((1677, 1701), 'protobix.DataContainer', 'protobix.DataContainer', ([], {}), '()\n', (1699, 1701), False, 'import protobix\n'), ((1782, 1825), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'self.cleanup'], {}), '(signal.SIGTERM, self.cleanup)\n', (1795, 1825), False, 'import signal\n'), ((1834, 1876), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'self.cleanup'], {}), '(signal.SIGINT, self.cleanup)\n', (1847, 1876), False, 'import signal\n'), ((5457, 5478), 'logging.debug', 'logging.debug', (['string'], {}), '(string)\n', (5470, 5478), False, 'import logging\n'), ((5602, 5640), 'logging.debug', 'logging.debug', (['"""Processing connection"""'], {}), "('Processing connection')\n", (5615, 5640), False, 'import logging\n'), ((5941, 5983), 'logging.debug', 'logging.debug', (["('Processing : ' + msg.topic)"], {}), "('Processing : ' + msg.topic)\n", (5954, 5983), False, 'import logging\n'), ((6853, 6894), 'logging.info', 'logging.info', (['"""Disconnecting from broker"""'], {}), "('Disconnecting from broker')\n", (6865, 6894), False, 'import logging\n'), ((7074, 7118), 'logging.info', 'logging.info', (['"""Exiting on signal %d"""', 'signum'], {}), "('Exiting on signal %d', signum)\n", (7086, 7118), False, 'import logging\n'), ((7127, 7143), 'sys.exit', 'sys.exit', (['signum'], {}), '(signum)\n', (7135, 7143), False, 'import sys\n'), ((7426, 7494), 'logging.debug', 'logging.debug', (['"""Connecting to %s:%s"""', 'self.MQTT_HOST', 'self.MQTT_PORT'], {}), "('Connecting to %s:%s', self.MQTT_HOST, self.MQTT_PORT)\n", (7439, 7494), False, 'import logging\n'), ((8406, 8434), 'logging.debug', 'logging.debug', (['"""Loading map"""'], {}), "('Loading map')\n", (8419, 8434), False, 'import logging\n'), ((1333, 1419), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'self.LOGFILE', 'level': 'logging.DEBUG', 'format': 'LOGFORMAT'}), '(filename=self.LOGFILE, level=logging.DEBUG, format=\n LOGFORMAT)\n', (1352, 1419), False, 'import logging\n'), ((1505, 1590), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'self.LOGFILE', 'level': 'logging.INFO', 'format': 'LOGFORMAT'}), '(filename=self.LOGFILE, level=logging.INFO, format=LOGFORMAT\n )\n', (1524, 1590), False, 'import logging\n'), ((3453, 3519), 'logging.info', 'logging.info', (['"""Connected to %s:%s"""', 'self.MQTT_HOST', 'self.MQTT_PORT'], {}), "('Connected to %s:%s', self.MQTT_HOST, self.MQTT_PORT)\n", (3465, 3519), False, 'import logging\n'), ((4758, 4793), 'logging.info', 'logging.info', (['"""Clean disconnection"""'], {}), "('Clean disconnection')\n", (4770, 4793), False, 'import logging\n'), ((4820, 4887), 'logging.info', 'logging.info', (['"""Unexpected disconnection! Reconnecting in 5 seconds"""'], {}), "('Unexpected disconnection! Reconnecting in 5 seconds')\n", (4832, 4887), False, 'import logging\n'), ((4900, 4945), 'logging.debug', 'logging.debug', (['"""Result code: %s"""', 'result_code'], {}), "('Result code: %s', result_code)\n", (4913, 4945), False, 'import logging\n'), ((4958, 4971), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (4968, 4971), False, 'import time\n'), ((6242, 6329), 'logging.info', 'logging.info', (['"""Sending %s %s to Zabbix for key %s"""', 'msg.topic', 'msg.payload', 'zbx_key'], {}), "('Sending %s %s to Zabbix for key %s', msg.topic, msg.payload,\n zbx_key)\n", (6254, 6329), False, 'import logging\n'), ((6634, 6673), 'logging.debug', 'logging.debug', (['"""Unknown: %s"""', 'msg.topic'], {}), "('Unknown: %s', msg.topic)\n", (6647, 6673), False, 'import logging\n'), ((7744, 7814), 'logging.info', 'logging.info', (['"""Connection failed with error code %s. Retrying"""', 'result'], {}), "('Connection failed with error code %s. Retrying', result)\n", (7756, 7814), False, 'import logging\n'), ((7827, 7841), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (7837, 7841), False, 'import time\n'), ((8512, 8533), 'csv.reader', 'csv.reader', (['inputfile'], {}), '(inputfile)\n', (8522, 8533), False, 'import csv\n'), ((1181, 1192), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1190, 1192), False, 'import os\n'), ((2079, 2118), 'logging.info', 'logging.info', (['"""Interrupted by keypress"""'], {}), "('Interrupted by keypress')\n", (2091, 2118), False, 'import logging\n'), ((2131, 2142), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2139, 2142), False, 'import sys\n'), ((3823, 3889), 'logging.info', 'logging.info', (['"""Connection refused - unacceptable protocol version"""'], {}), "('Connection refused - unacceptable protocol version')\n", (3835, 3889), False, 'import logging\n'), ((3960, 4016), 'logging.info', 'logging.info', (['"""Connection refused - identifier rejected"""'], {}), "('Connection refused - identifier rejected')\n", (3972, 4016), False, 'import logging\n'), ((1089, 1105), 'socket.getfqdn', 'socket.getfqdn', ([], {}), '()\n', (1103, 1105), False, 'import socket\n'), ((4087, 4142), 'logging.info', 'logging.info', (['"""Connection refused - server unavailable"""'], {}), "('Connection refused - server unavailable')\n", (4099, 4142), False, 'import logging\n'), ((4155, 4193), 'logging.info', 'logging.info', (['"""Retrying in 30 seconds"""'], {}), "('Retrying in 30 seconds')\n", (4167, 4193), False, 'import logging\n'), ((4206, 4220), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (4216, 4220), False, 'import time\n'), ((4264, 4326), 'logging.info', 'logging.info', (['"""Connection refused - bad user name or password"""'], {}), "('Connection refused - bad user name or password')\n", (4276, 4326), False, 'import logging\n'), ((4397, 4448), 'logging.info', 'logging.info', (['"""Connection refused - not authorised"""'], {}), "('Connection refused - not authorised')\n", (4409, 4448), False, 'import logging\n')] |
from adminsortable2.admin import SortableInlineAdminMixin
from django.contrib import admin
from .forms import GlobalOptionsInlineForm, ChartOptionsInlineForm, DatasetOptionsInlineForm, AxisOptionsInlineForm, \
ColorInputForm
from .models import GlobalOptionsGroupModel, GlobalOptionsModel, \
ChartOptionsGroupModel, ChartOptionsModel, \
DatasetOptionsGroupModel, DatasetOptionsModel, \
AxisOptionsGroupModel, AxisOptionsModel, ChartSpecificOptionsModel, DatasetSpecificOptionsModel
from .models_colors import ColorModel, ColorGroupModel
# ------------------------
# Inline Forms for Options
# ------------------------
class OptionsInlineBase(admin.TabularInline):
fields = ['label', 'type', 'value']
list_display = ('label', 'type', 'value')
extra = 0
# Options Groups Inlines
# ------------------------
class GlobalOptionsInlineAdmin(OptionsInlineBase):
model = GlobalOptionsModel
form = GlobalOptionsInlineForm
class ChartOptionsInlineAdmin(OptionsInlineBase):
model = ChartOptionsModel
form = ChartOptionsInlineForm
class DatasetOptionsInlineAdmin(OptionsInlineBase):
model = DatasetOptionsModel
form = DatasetOptionsInlineForm
class AxisOptionsInlineAdmin(OptionsInlineBase):
model = AxisOptionsModel
form = AxisOptionsInlineForm
# Specific Options inlines
# ------------------------
class ChartSpecificOptionsInlineAdmin(OptionsInlineBase):
model = ChartSpecificOptionsModel
form = ChartOptionsInlineForm
class DatasetSpecificOptionsInlineAdmin(OptionsInlineBase):
model = DatasetSpecificOptionsModel
form = DatasetOptionsInlineForm
# Register Options Groups
# ------------------------
@admin.register(GlobalOptionsGroupModel)
class GlobalOptionsAdmin(admin.ModelAdmin):
fields = ['name', 'enabled', 'site', 'colors']
list_display = ('name', 'enabled')
inlines = [
GlobalOptionsInlineAdmin,
]
@admin.register(ChartOptionsGroupModel)
class ChartsOptionsAdmin(admin.ModelAdmin):
fields = ['name', ]
list_display = ('name',)
inlines = [
ChartOptionsInlineAdmin,
]
@admin.register(DatasetOptionsGroupModel)
class DatasetOptionsAdmin(admin.ModelAdmin):
fields = ['name', ]
list_display = ('name',)
inlines = [
DatasetOptionsInlineAdmin,
]
@admin.register(AxisOptionsGroupModel)
class AxisOptionsAdmin(admin.ModelAdmin):
fields = ['name', 'slug', 'type', 'display', 'weight']
readonly_fields = ['slug']
list_display = ('name', 'type')
inlines = [
AxisOptionsInlineAdmin,
]
# Register Colour Admins
# ------------------------
class ColorsInline(admin.TabularInline):
fields = ['types', 'labels', 'colors']
list_display = ['types', 'labels', 'colors']
extra = 0
model = ColorModel
form = ColorInputForm
@admin.register(ColorGroupModel)
class ColorGroupAdmin(admin.ModelAdmin):
fields = ['name']
list_display = ('name',)
inlines = [
ColorsInline,
]
| [
"django.contrib.admin.register"
] | [((1682, 1721), 'django.contrib.admin.register', 'admin.register', (['GlobalOptionsGroupModel'], {}), '(GlobalOptionsGroupModel)\n', (1696, 1721), False, 'from django.contrib import admin\n'), ((1914, 1952), 'django.contrib.admin.register', 'admin.register', (['ChartOptionsGroupModel'], {}), '(ChartOptionsGroupModel)\n', (1928, 1952), False, 'from django.contrib import admin\n'), ((2107, 2147), 'django.contrib.admin.register', 'admin.register', (['DatasetOptionsGroupModel'], {}), '(DatasetOptionsGroupModel)\n', (2121, 2147), False, 'from django.contrib import admin\n'), ((2305, 2342), 'django.contrib.admin.register', 'admin.register', (['AxisOptionsGroupModel'], {}), '(AxisOptionsGroupModel)\n', (2319, 2342), False, 'from django.contrib import admin\n'), ((2817, 2848), 'django.contrib.admin.register', 'admin.register', (['ColorGroupModel'], {}), '(ColorGroupModel)\n', (2831, 2848), False, 'from django.contrib import admin\n')] |
# Generated by Django 3.1.7 on 2021-03-02 15:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('shop', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('category', '0002_auto_20210302_2055'),
]
operations = [
migrations.AddField(
model_name='category',
name='added_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='category',
name='branch',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.shopbranch'),
),
migrations.AlterUniqueTogether(
name='menucategory',
unique_together={('category', 'menu')},
),
migrations.AlterUniqueTogether(
name='itemcategory',
unique_together={('category', 'item')},
),
]
| [
"django.db.migrations.AlterUniqueTogether",
"django.db.migrations.swappable_dependency",
"django.db.models.ForeignKey"
] | [((281, 338), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (312, 338), False, 'from django.db import migrations, models\n'), ((867, 963), 'django.db.migrations.AlterUniqueTogether', 'migrations.AlterUniqueTogether', ([], {'name': '"""menucategory"""', 'unique_together': "{('category', 'menu')}"}), "(name='menucategory', unique_together={(\n 'category', 'menu')})\n", (897, 963), False, 'from django.db import migrations, models\n'), ((1003, 1099), 'django.db.migrations.AlterUniqueTogether', 'migrations.AlterUniqueTogether', ([], {'name': '"""itemcategory"""', 'unique_together': "{('category', 'item')}"}), "(name='itemcategory', unique_together={(\n 'category', 'item')})\n", (1033, 1099), False, 'from django.db import migrations, models\n'), ((526, 645), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to=settings.AUTH_USER_MODEL)\n', (543, 645), False, 'from django.db import migrations, models\n'), ((762, 851), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""shop.shopbranch"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'shop.shopbranch')\n", (779, 851), False, 'from django.db import migrations, models\n')] |
# Generated by Django 2.2.7 on 2020-04-22 13:16
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("core", "0012_remove_subject_field_from_referral"),
]
operations = [
migrations.AlterField(
model_name="referral",
name="user",
field=models.ForeignKey(
blank=True,
help_text="User who created the referral",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="referrals_created",
to=settings.AUTH_USER_MODEL,
verbose_name="user",
),
),
migrations.CreateModel(
name="ReferralAssignment",
fields=[
(
"id",
models.AutoField(
editable=False,
help_text="Primary key for the assignment",
primary_key=True,
serialize=False,
verbose_name="id",
),
),
(
"created_at",
models.DateTimeField(auto_now_add=True, verbose_name="created at"),
),
(
"assignee",
models.ForeignKey(
help_text="User is assigned to work on the referral",
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
verbose_name="assignee",
),
),
(
"created_by",
models.ForeignKey(
blank=True,
help_text="User who created the assignment",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
verbose_name="created_by",
),
),
(
"referral",
models.ForeignKey(
help_text="Referral the assignee is linked with",
on_delete=django.db.models.deletion.CASCADE,
to="core.Referral",
verbose_name="referral",
),
),
(
"unit",
models.ForeignKey(
blank=True,
help_text="Unit under which the assignment was created",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="core.Unit",
verbose_name="unit",
),
),
],
options={
"verbose_name": "referral assignment",
"db_table": "partaj_referralassignment",
"unique_together": {("assignee", "referral")},
},
),
migrations.AddField(
model_name="referral",
name="assignees",
field=models.ManyToManyField(
help_text="Partaj users that have been assigned to work on this referral",
related_name="referrals_assigned",
through="core.ReferralAssignment",
to=settings.AUTH_USER_MODEL,
verbose_name="assignees",
),
),
]
| [
"django.db.models.ForeignKey",
"django.db.models.ManyToManyField",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.migrations.swappable_dependency"
] | [((227, 284), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (258, 284), False, 'from django.db import migrations, models\n'), ((482, 704), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'help_text': '"""User who created the referral"""', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""referrals_created"""', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""user"""'}), "(blank=True, help_text='User who created the referral',\n null=True, on_delete=django.db.models.deletion.SET_NULL, related_name=\n 'referrals_created', to=settings.AUTH_USER_MODEL, verbose_name='user')\n", (499, 704), False, 'from django.db import migrations, models\n'), ((3506, 3741), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'help_text': '"""Partaj users that have been assigned to work on this referral"""', 'related_name': '"""referrals_assigned"""', 'through': '"""core.ReferralAssignment"""', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""assignees"""'}), "(help_text=\n 'Partaj users that have been assigned to work on this referral',\n related_name='referrals_assigned', through='core.ReferralAssignment',\n to=settings.AUTH_USER_MODEL, verbose_name='assignees')\n", (3528, 3741), False, 'from django.db import migrations, models\n'), ((991, 1125), 'django.db.models.AutoField', 'models.AutoField', ([], {'editable': '(False)', 'help_text': '"""Primary key for the assignment"""', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""id"""'}), "(editable=False, help_text='Primary key for the assignment',\n primary_key=True, serialize=False, verbose_name='id')\n", (1007, 1125), False, 'from django.db import migrations, models\n'), ((1357, 1423), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""created at"""'}), "(auto_now_add=True, verbose_name='created at')\n", (1377, 1423), False, 'from django.db import migrations, models\n'), ((1514, 1693), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'help_text': '"""User is assigned to work on the referral"""', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""assignee"""'}), "(help_text='User is assigned to work on the referral',\n on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL, verbose_name='assignee')\n", (1531, 1693), False, 'from django.db import migrations, models\n'), ((1896, 2110), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'help_text': '"""User who created the assignment"""', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""+"""', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""created_by"""'}), "(blank=True, help_text='User who created the assignment',\n null=True, on_delete=django.db.models.deletion.SET_NULL, related_name=\n '+', to=settings.AUTH_USER_MODEL, verbose_name='created_by')\n", (1913, 2110), False, 'from django.db import migrations, models\n'), ((2383, 2548), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'help_text': '"""Referral the assignee is linked with"""', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""core.Referral"""', 'verbose_name': '"""referral"""'}), "(help_text='Referral the assignee is linked with',\n on_delete=django.db.models.deletion.CASCADE, to='core.Referral',\n verbose_name='referral')\n", (2400, 2548), False, 'from django.db import migrations, models\n'), ((2746, 2958), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'help_text': '"""Unit under which the assignment was created"""', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""+"""', 'to': '"""core.Unit"""', 'verbose_name': '"""unit"""'}), "(blank=True, help_text=\n 'Unit under which the assignment was created', null=True, on_delete=\n django.db.models.deletion.SET_NULL, related_name='+', to='core.Unit',\n verbose_name='unit')\n", (2763, 2958), False, 'from django.db import migrations, models\n')] |
"""
This component provides support for Home Automation Manager (HAM).
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/edgeos/
"""
import logging
import sys
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from .helpers import async_set_ha, clear_ha, get_ha, handle_log_level
from .helpers.const import *
REQUIREMENTS = ["aiohttp"]
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass, config):
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up a EdgeOS component."""
initialized = False
try:
await handle_log_level(hass, entry)
_LOGGER.debug(f"Starting async_setup_entry of {DOMAIN}")
entry.add_update_listener(async_options_updated)
await async_set_ha(hass, entry)
initialized = True
except Exception as ex:
exc_type, exc_obj, tb = sys.exc_info()
line_number = tb.tb_lineno
_LOGGER.error(f"Failed to load EdgeOS, error: {ex}, line: {line_number}")
return initialized
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
ha = get_ha(hass, entry.entry_id)
if ha is not None:
await ha.async_remove(entry)
clear_ha(hass, entry.entry_id)
return True
async def async_options_updated(hass: HomeAssistant, entry: ConfigEntry):
"""Triggered by config entry options updates."""
await handle_log_level(hass, entry)
_LOGGER.info(f"async_options_updated, Entry: {entry.as_dict()} ")
ha = get_ha(hass, entry.entry_id)
if ha is not None:
await ha.async_update_entry(entry)
| [
"logging.getLogger",
"sys.exc_info"
] | [((460, 487), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (477, 487), False, 'import logging\n'), ((991, 1005), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1003, 1005), False, 'import sys\n')] |
# Generated by Django 3.2 on 2021-06-14 21:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Cinema', '0010_merge_0009_auto_20210611_1256_0009_auto_20210611_1549'),
]
operations = [
migrations.RemoveField(
model_name='purchase',
name='discounts',
),
migrations.AddField(
model_name='entry',
name='discounts',
field=models.ManyToManyField(db_index=True, to='Cinema.Discount'),
),
migrations.AddField(
model_name='entry',
name='reserved',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='movie',
name='poster',
field=models.ImageField(blank=True, default='/movie_pics/default.jpg', null=True, upload_to='profile_pics/'),
),
migrations.AlterField(
model_name='purchase',
name='entries',
field=models.ManyToManyField(db_index=True, to='Cinema.Entry'),
),
]
| [
"django.db.models.ImageField",
"django.db.migrations.RemoveField",
"django.db.models.ManyToManyField",
"django.db.models.BooleanField"
] | [((267, 330), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""purchase"""', 'name': '"""discounts"""'}), "(model_name='purchase', name='discounts')\n", (289, 330), False, 'from django.db import migrations, models\n'), ((476, 535), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'db_index': '(True)', 'to': '"""Cinema.Discount"""'}), "(db_index=True, to='Cinema.Discount')\n", (498, 535), False, 'from django.db import migrations, models\n'), ((656, 690), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (675, 690), False, 'from django.db import migrations, models\n'), ((811, 917), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'default': '"""/movie_pics/default.jpg"""', 'null': '(True)', 'upload_to': '"""profile_pics/"""'}), "(blank=True, default='/movie_pics/default.jpg', null=True,\n upload_to='profile_pics/')\n", (828, 917), False, 'from django.db import migrations, models\n'), ((1038, 1094), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'db_index': '(True)', 'to': '"""Cinema.Entry"""'}), "(db_index=True, to='Cinema.Entry')\n", (1060, 1094), False, 'from django.db import migrations, models\n')] |
from django.contrib import admin
from .models import Post, Draft
from import_export.admin import ImportExportModelAdmin
# Register your models here.
# admin.site.register(Post)
# admin.site.register(Draft)
@admin.register(Draft)
@admin.register(Post)
class ViewAdmin(ImportExportModelAdmin):
pass
| [
"django.contrib.admin.register"
] | [((210, 231), 'django.contrib.admin.register', 'admin.register', (['Draft'], {}), '(Draft)\n', (224, 231), False, 'from django.contrib import admin\n'), ((233, 253), 'django.contrib.admin.register', 'admin.register', (['Post'], {}), '(Post)\n', (247, 253), False, 'from django.contrib import admin\n')] |
#!/usr/bin/env python
# -*- coding: utf-8
import numpy as np
from ibidem.advent_of_code.util import get_input_name
BASE_PATTERN = [0, 1, 0, -1]
def pattern_generator(i):
first = True
while True:
for v in BASE_PATTERN:
for _ in range(i + 1):
if first:
first = False
continue
yield v
def process_signal(i, signal):
return abs(sum((v * p) for (v, p) in zip(signal, pattern_generator(i)))) % 10
def process_phase(signal):
return [process_signal(i, signal) for i in range(len(signal))]
def process_phase_offset(signal):
cs = np.flip(np.cumsum(np.flip(signal)))
return np.mod(np.abs(cs), 10)
def process(data, repetitions=1, offset=None):
print("Starting new process")
signal = np.fromiter((int(c) for c in data), dtype=np.int8)
signal = np.tile(signal, repetitions)
print("Signal is {} digits long".format(len(signal)))
if offset is None:
offset = int(data[:7])
assert offset > len(signal) / 2
print("Dropping first {} digits".format(offset))
pp = process_phase_offset
else:
pp = process_phase
signal = np.array(signal[offset:])
print("Signal is {} digits long after dropping offset".format(len(signal)))
for phase in range(100):
signal = pp(signal)
if phase % 10 == 0:
print("Completed phase {}".format(phase))
return "".join(str(d) for d in signal)[:8]
def part1():
with open(get_input_name(16, 2019)) as fobj:
data = fobj.read().strip()
result = process(data, offset=0)
print("After 100 phases, the cleaned signal starts with these 8 digits: {}".format(result))
def part2():
with open(get_input_name(16, 2019)) as fobj:
data = fobj.read().strip()
result = process(data, repetitions=10000)
print("After 100 phases, the cleaned signal starts with these 8 digits: {}".format(result))
if __name__ == "__main__":
part1()
part2()
| [
"numpy.tile",
"numpy.flip",
"numpy.abs",
"numpy.array",
"ibidem.advent_of_code.util.get_input_name"
] | [((873, 901), 'numpy.tile', 'np.tile', (['signal', 'repetitions'], {}), '(signal, repetitions)\n', (880, 901), True, 'import numpy as np\n'), ((1195, 1220), 'numpy.array', 'np.array', (['signal[offset:]'], {}), '(signal[offset:])\n', (1203, 1220), True, 'import numpy as np\n'), ((697, 707), 'numpy.abs', 'np.abs', (['cs'], {}), '(cs)\n', (703, 707), True, 'import numpy as np\n'), ((661, 676), 'numpy.flip', 'np.flip', (['signal'], {}), '(signal)\n', (668, 676), True, 'import numpy as np\n'), ((1516, 1540), 'ibidem.advent_of_code.util.get_input_name', 'get_input_name', (['(16)', '(2019)'], {}), '(16, 2019)\n', (1530, 1540), False, 'from ibidem.advent_of_code.util import get_input_name\n'), ((1748, 1772), 'ibidem.advent_of_code.util.get_input_name', 'get_input_name', (['(16)', '(2019)'], {}), '(16, 2019)\n', (1762, 1772), False, 'from ibidem.advent_of_code.util import get_input_name\n')] |
import pandas as pd
import os
import numpy as np
import math
import ast
sigma_list = [ math.pow(2,i) for i in range(8)]
for sigma in sigma_list:
test_case = 'mnist'
data_dict={}
data_dict_sum={}
# for key in def_data.keys():
# data_dict[key] = def_data[key].tolist()
file_name=os.path.join('saved_results_1000',test_case+str(sigma).zfill(3))
file_name_sum=file_name+ '_sum'
df = pd.read_csv(file_name,sep='\t')
df_sum = pd.read_csv(file_name_sum,sep='\t')
a0 = df['1'][0].strip('][').split(', ')
a1 = df['1'][1].strip('][').split(', ')
a2 = df['1'][2].strip('][').split(', ')
a3 = df['1'][3].strip('][').split(', ')
a4 = df['1'][4].strip('][').split(', ')
a5 = df['1'][5].strip('][').split(', ')
data_dict['deformed_labels'] = np.asarray([ int(i) for i in a0])
data_dict['original_labels'] = np.asarray([ int(i) for i in a1])
data_dict['norms'] = np.asarray([ float(i) for i in a2])
data_dict['iterations'] = np.asarray([ int(i) for i in a3])
data_dict['overshot'] = np.asarray([ bool(i) for i in a4])
data_dict['same_label'] = np.asarray([ bool(i) for i in a5])
data_dict_sum['test_case'] = test_case
data_dict_sum['sigma'] = sigma
data_dict_sum['def_suc_rate'] = np.sum(data_dict['same_label'])/data_dict['same_label'].shape[0]
data_dict_sum['avg_iter'] = np.sum(data_dict['iterations'])/data_dict['iterations'].shape[0]
data_dict_sum['norm'] = np.sum(data_dict['norms'])/data_dict['norms'].shape[0]
df = pd.DataFrame.from_dict(data_dict)
df_sum = pd.DataFrame.from_dict(data_dict_sum)
df.to_csv(file_name, sep='\t')
df_sum.to_csv(file_name_sum, sep='\t')
| [
"math.pow",
"numpy.sum",
"pandas.DataFrame.from_dict",
"pandas.read_csv"
] | [((88, 102), 'math.pow', 'math.pow', (['(2)', 'i'], {}), '(2, i)\n', (96, 102), False, 'import math\n'), ((423, 455), 'pandas.read_csv', 'pd.read_csv', (['file_name'], {'sep': '"""\t"""'}), "(file_name, sep='\\t')\n", (434, 455), True, 'import pandas as pd\n'), ((468, 504), 'pandas.read_csv', 'pd.read_csv', (['file_name_sum'], {'sep': '"""\t"""'}), "(file_name_sum, sep='\\t')\n", (479, 504), True, 'import pandas as pd\n'), ((1534, 1567), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['data_dict'], {}), '(data_dict)\n', (1556, 1567), True, 'import pandas as pd\n'), ((1581, 1618), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['data_dict_sum'], {}), '(data_dict_sum)\n', (1603, 1618), True, 'import pandas as pd\n'), ((1278, 1309), 'numpy.sum', 'np.sum', (["data_dict['same_label']"], {}), "(data_dict['same_label'])\n", (1284, 1309), True, 'import numpy as np\n'), ((1375, 1406), 'numpy.sum', 'np.sum', (["data_dict['iterations']"], {}), "(data_dict['iterations'])\n", (1381, 1406), True, 'import numpy as np\n'), ((1468, 1494), 'numpy.sum', 'np.sum', (["data_dict['norms']"], {}), "(data_dict['norms'])\n", (1474, 1494), True, 'import numpy as np\n')] |
import argparse
import logging
import sys
import json
from pprint import pprint
from deepdiff import DeepDiff
from mungetout import convert
from mungetout import __version__
__author__ = "<NAME>"
__copyright__ = "<NAME>"
__license__ = "apache"
_logger = logging.getLogger(__name__)
def parse_args(args):
"""Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser(
description="Basic json diff")
parser.add_argument(
'--version',
action='version',
version='mungetout {ver}'.format(ver=__version__))
parser.add_argument(
'file',
metavar='FILE',
nargs=2,
help='File to diff'
)
parser.add_argument(
'--filter-unique-fields',
dest="unique",
help="EXPERIMENTAL: Only compare fields that appear in both",
action='store_true',
default=False)
parser.add_argument(
'-v',
'--verbose',
dest="loglevel",
help="set loglevel to INFO",
action='store_const',
const=logging.INFO)
parser.add_argument(
'-vv',
'--very-verbose',
dest="loglevel",
help="set loglevel to DEBUG",
action='store_const',
const=logging.DEBUG)
return parser.parse_args(args)
def setup_logging(loglevel):
"""Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages
"""
logformat = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
logging.basicConfig(level=loglevel, stream=sys.stderr,
format=logformat, datefmt="%Y-%m-%d %H:%M:%S")
def main(args):
"""Main entry point allowing external calls
Args:
args ([str]): command line parameter list
"""
args = parse_args(args)
setup_logging(args.loglevel)
with open(args.file[0]) as f1, open(args.file[1]) as f2:
c1 = convert.clean(json.load(f1), filter_benchmarks=True,
filter_serials=True)
c2 = convert.clean(json.load(f2), filter_benchmarks=True,
filter_serials=True)
if args.unique:
# x[1] element can be a disk or cpu id, x[3] is the value, so
# only compare x[0] and x[2]. That way a difference in the
# number of cpus or disks will still be shown.
c1_keys = {(x[0], x[2]) for x in c1}
c2_keys = {(x[0], x[2]) for x in c2}
common_keys = c1_keys.intersection(c2_keys)
c1 = [x for x in c1 if (x[0], x[2]) in common_keys]
c2 = [x for x in c2 if (x[0], x[2]) in common_keys]
ddiff = DeepDiff(c1, c2, ignore_order=True)
pprint(ddiff, indent=2)
def run():
"""Entry point for console_scripts
"""
main(sys.argv[1:])
| [
"logging.getLogger",
"logging.basicConfig",
"deepdiff.DeepDiff",
"argparse.ArgumentParser",
"json.load",
"pprint.pprint"
] | [((258, 285), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (275, 285), False, 'import logging\n'), ((523, 577), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Basic json diff"""'}), "(description='Basic json diff')\n", (546, 577), False, 'import argparse\n'), ((1650, 1755), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'loglevel', 'stream': 'sys.stderr', 'format': 'logformat', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(level=loglevel, stream=sys.stderr, format=logformat,\n datefmt='%Y-%m-%d %H:%M:%S')\n", (1669, 1755), False, 'import logging\n'), ((2785, 2820), 'deepdiff.DeepDiff', 'DeepDiff', (['c1', 'c2'], {'ignore_order': '(True)'}), '(c1, c2, ignore_order=True)\n', (2793, 2820), False, 'from deepdiff import DeepDiff\n'), ((2829, 2852), 'pprint.pprint', 'pprint', (['ddiff'], {'indent': '(2)'}), '(ddiff, indent=2)\n', (2835, 2852), False, 'from pprint import pprint\n'), ((2058, 2071), 'json.load', 'json.load', (['f1'], {}), '(f1)\n', (2067, 2071), False, 'import json\n'), ((2172, 2185), 'json.load', 'json.load', (['f2'], {}), '(f2)\n', (2181, 2185), False, 'import json\n')] |
import pprint
from pathlib import Path
from typing import Optional
import typer
from embeddings.defaults import RESULTS_PATH
from embeddings.evaluator.sequence_labeling_evaluator import SequenceLabelingEvaluator
from embeddings.pipeline.flair_sequence_labeling import FlairSequenceLabelingPipeline
app = typer.Typer()
def run(
embedding_name: str = typer.Option(
"allegro/herbert-base-cased", help="Hugging Face embedding model name or path."
),
dataset_name: str = typer.Option(
"clarin-pl/kpwr-ner", help="Hugging Face dataset name or path."
),
input_column_name: str = typer.Option(
"tokens", help="Column name that contains text to classify."
),
target_column_name: str = typer.Option(
"ner", help="Column name that contains tag labels for POS tagging."
),
root: str = typer.Option(RESULTS_PATH.joinpath("pos_tagging")),
hidden_size: int = typer.Option(256, help="Number of hidden states in RNN."),
evaluation_mode: SequenceLabelingEvaluator.EvaluationMode = typer.Option(
SequenceLabelingEvaluator.EvaluationMode.CONLL,
help="Evaluation mode. Supported modes: [unit, conll, strict].",
),
tagging_scheme: Optional[SequenceLabelingEvaluator.TaggingScheme] = typer.Option(
None, help="Tagging scheme. Supported schemes: [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU]"
),
) -> None:
typer.echo(pprint.pformat(locals()))
output_path = Path(root, embedding_name, dataset_name)
output_path.mkdir(parents=True, exist_ok=True)
pipeline = FlairSequenceLabelingPipeline(
embedding_name,
dataset_name,
input_column_name,
target_column_name,
output_path,
hidden_size,
evaluation_mode,
tagging_scheme,
)
result = pipeline.run()
typer.echo(pprint.pformat(result))
typer.run(run)
| [
"embeddings.defaults.RESULTS_PATH.joinpath",
"pathlib.Path",
"typer.Option",
"typer.Typer",
"pprint.pformat",
"embeddings.pipeline.flair_sequence_labeling.FlairSequenceLabelingPipeline",
"typer.run"
] | [((307, 320), 'typer.Typer', 'typer.Typer', ([], {}), '()\n', (318, 320), False, 'import typer\n'), ((1859, 1873), 'typer.run', 'typer.run', (['run'], {}), '(run)\n', (1868, 1873), False, 'import typer\n'), ((358, 456), 'typer.Option', 'typer.Option', (['"""allegro/herbert-base-cased"""'], {'help': '"""Hugging Face embedding model name or path."""'}), "('allegro/herbert-base-cased', help=\n 'Hugging Face embedding model name or path.')\n", (370, 456), False, 'import typer\n'), ((491, 568), 'typer.Option', 'typer.Option', (['"""clarin-pl/kpwr-ner"""'], {'help': '"""Hugging Face dataset name or path."""'}), "('clarin-pl/kpwr-ner', help='Hugging Face dataset name or path.')\n", (503, 568), False, 'import typer\n'), ((613, 687), 'typer.Option', 'typer.Option', (['"""tokens"""'], {'help': '"""Column name that contains text to classify."""'}), "('tokens', help='Column name that contains text to classify.')\n", (625, 687), False, 'import typer\n'), ((733, 819), 'typer.Option', 'typer.Option', (['"""ner"""'], {'help': '"""Column name that contains tag labels for POS tagging."""'}), "('ner', help=\n 'Column name that contains tag labels for POS tagging.')\n", (745, 819), False, 'import typer\n'), ((921, 978), 'typer.Option', 'typer.Option', (['(256)'], {'help': '"""Number of hidden states in RNN."""'}), "(256, help='Number of hidden states in RNN.')\n", (933, 978), False, 'import typer\n'), ((1044, 1174), 'typer.Option', 'typer.Option', (['SequenceLabelingEvaluator.EvaluationMode.CONLL'], {'help': '"""Evaluation mode. Supported modes: [unit, conll, strict]."""'}), "(SequenceLabelingEvaluator.EvaluationMode.CONLL, help=\n 'Evaluation mode. Supported modes: [unit, conll, strict].')\n", (1056, 1174), False, 'import typer\n'), ((1266, 1376), 'typer.Option', 'typer.Option', (['None'], {'help': '"""Tagging scheme. Supported schemes: [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU]"""'}), "(None, help=\n 'Tagging scheme. Supported schemes: [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU]'\n )\n", (1278, 1376), False, 'import typer\n'), ((1453, 1493), 'pathlib.Path', 'Path', (['root', 'embedding_name', 'dataset_name'], {}), '(root, embedding_name, dataset_name)\n', (1457, 1493), False, 'from pathlib import Path\n'), ((1561, 1726), 'embeddings.pipeline.flair_sequence_labeling.FlairSequenceLabelingPipeline', 'FlairSequenceLabelingPipeline', (['embedding_name', 'dataset_name', 'input_column_name', 'target_column_name', 'output_path', 'hidden_size', 'evaluation_mode', 'tagging_scheme'], {}), '(embedding_name, dataset_name,\n input_column_name, target_column_name, output_path, hidden_size,\n evaluation_mode, tagging_scheme)\n', (1590, 1726), False, 'from embeddings.pipeline.flair_sequence_labeling import FlairSequenceLabelingPipeline\n'), ((859, 895), 'embeddings.defaults.RESULTS_PATH.joinpath', 'RESULTS_PATH.joinpath', (['"""pos_tagging"""'], {}), "('pos_tagging')\n", (880, 895), False, 'from embeddings.defaults import RESULTS_PATH\n'), ((1833, 1855), 'pprint.pformat', 'pprint.pformat', (['result'], {}), '(result)\n', (1847, 1855), False, 'import pprint\n')] |