hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c498df9a78bd33534951fe5b48a871a66008a16
| 740
|
py
|
Python
|
static/generaMapas/generaCalendarioPolinico.py
|
othesoluciones/TFM
|
8ed46985604c83c517612b38326b39a61b4cf102
|
[
"MIT"
] | null | null | null |
static/generaMapas/generaCalendarioPolinico.py
|
othesoluciones/TFM
|
8ed46985604c83c517612b38326b39a61b4cf102
|
[
"MIT"
] | null | null | null |
static/generaMapas/generaCalendarioPolinico.py
|
othesoluciones/TFM
|
8ed46985604c83c517612b38326b39a61b4cf102
|
[
"MIT"
] | null | null | null |
#Conectamos a la base de datos
import base64
import json
from pymongo import MongoClient as Connection
cadenaCon= 'mongodb://othesoluciones:'+base64.b64decode("b3RoZXNvbHVjaW9uZXM=")+'@ds029635.mlab.com:29635/othesoluciones1'
MONGODB_URI =cadenaCon
conexion = Connection(MONGODB_URI)
db = conexion.othesoluciones1
import pandas as pd
#Calendario polinico (http://encuentralainspiracion.es/la-alergia-respiratoria/tipos-de-alergenos/alergia-al-polen/calendario-de-polinizacion/)
columnas =['Mes','Nivel']
datos = [(1,0),(2,0),(3,1),(4,2),(5,2),(6,2),(7,1),(8,0),(9,0),(10,0),(11,0),(12,0)]
df=pd.DataFrame(datos,columns=columnas)
recordsdf = json.loads(df.T.to_json()).values()
db.calendarioPolen.insert_many(recordsdf)
conexion.close()
| 35.238095
| 143
| 0.758108
|
import base64
import json
from pymongo import MongoClient as Connection
cadenaCon= 'mongodb://othesoluciones:'+base64.b64decode("b3RoZXNvbHVjaW9uZXM=")+'@ds029635.mlab.com:29635/othesoluciones1'
MONGODB_URI =cadenaCon
conexion = Connection(MONGODB_URI)
db = conexion.othesoluciones1
import pandas as pd
columnas =['Mes','Nivel']
datos = [(1,0),(2,0),(3,1),(4,2),(5,2),(6,2),(7,1),(8,0),(9,0),(10,0),(11,0),(12,0)]
df=pd.DataFrame(datos,columns=columnas)
recordsdf = json.loads(df.T.to_json()).values()
db.calendarioPolen.insert_many(recordsdf)
conexion.close()
| true
| true
|
1c498e29631e47183b4ba18522e66da6b0b16e5d
| 1,139
|
py
|
Python
|
build.py
|
db4/conan-opencv
|
c624fdc34452ba0206f9862c16c8e5b52f1738f9
|
[
"MIT"
] | null | null | null |
build.py
|
db4/conan-opencv
|
c624fdc34452ba0206f9862c16c8e5b52f1738f9
|
[
"MIT"
] | null | null | null |
build.py
|
db4/conan-opencv
|
c624fdc34452ba0206f9862c16c8e5b52f1738f9
|
[
"MIT"
] | 1
|
2018-09-14T10:18:35.000Z
|
2018-09-14T10:18:35.000Z
|
from conan.packager import ConanMultiPackager
import os
available_versions = ["3.1.0", "3.4.0"]
def main():
"""
Main function.
"""
builder = ConanMultiPackager(build_policy="outdated")
if "CONAN_REFERENCE" in os.environ:
builder.add_common_builds(shared_option_name="OpenCV:shared", pure_c=False)
else:
for version in available_versions:
builder.add_common_builds(shared_option_name="OpenCV:shared", pure_c=False, reference="OpenCV/%s@%s/%s" %
(version, os.environ["CONAN_USERNAME"], os.environ["CONAN_CHANNEL"]))
filtered_builds = []
for settings, options, env_vars, build_requires in builder.builds:
with_ipp_tbb_list = [True] if options['OpenCV:shared'] else [True, False]
for with_ipp_tbb in with_ipp_tbb_list:
opts = dict(options)
opts['OpenCV:with_ipp'] = with_ipp_tbb
opts['OpenCV:with_tbb'] = with_ipp_tbb
filtered_builds.append([settings, opts, env_vars, build_requires])
builder.builds = filtered_builds
builder.run()
if __name__ == "__main__":
main()
| 35.59375
| 117
| 0.656716
|
from conan.packager import ConanMultiPackager
import os
available_versions = ["3.1.0", "3.4.0"]
def main():
builder = ConanMultiPackager(build_policy="outdated")
if "CONAN_REFERENCE" in os.environ:
builder.add_common_builds(shared_option_name="OpenCV:shared", pure_c=False)
else:
for version in available_versions:
builder.add_common_builds(shared_option_name="OpenCV:shared", pure_c=False, reference="OpenCV/%s@%s/%s" %
(version, os.environ["CONAN_USERNAME"], os.environ["CONAN_CHANNEL"]))
filtered_builds = []
for settings, options, env_vars, build_requires in builder.builds:
with_ipp_tbb_list = [True] if options['OpenCV:shared'] else [True, False]
for with_ipp_tbb in with_ipp_tbb_list:
opts = dict(options)
opts['OpenCV:with_ipp'] = with_ipp_tbb
opts['OpenCV:with_tbb'] = with_ipp_tbb
filtered_builds.append([settings, opts, env_vars, build_requires])
builder.builds = filtered_builds
builder.run()
if __name__ == "__main__":
main()
| true
| true
|
1c498e2f7add5e8cba6aa3ffe578938129f969b3
| 507
|
py
|
Python
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/cli.py
|
romnn/cookiecutter-pypackage
|
469228de74a6cd0a8065270ff7c930d016e2f045
|
[
"BSD-3-Clause"
] | 1
|
2021-01-30T04:10:24.000Z
|
2021-01-30T04:10:24.000Z
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/cli.py
|
romnnn/cookiecutter-pypackage
|
469228de74a6cd0a8065270ff7c930d016e2f045
|
[
"BSD-3-Clause"
] | null | null | null |
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/cli.py
|
romnnn/cookiecutter-pypackage
|
469228de74a6cd0a8065270ff7c930d016e2f045
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Console script for {{cookiecutter.project_slug}}."""
import sys
import typing
import click
@click.command()
def main(args: typing.Optional[str] = None) -> int:
"""Console script for {{cookiecutter.project_slug}}."""
click.echo("Replace this message by putting your code into {{cookiecutter.project_slug}}.cli.main")
click.echo("See click documentation at http://click.pocoo.org/")
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| 25.35
| 103
| 0.682446
|
import sys
import typing
import click
@click.command()
def main(args: typing.Optional[str] = None) -> int:
click.echo("Replace this message by putting your code into {{cookiecutter.project_slug}}.cli.main")
click.echo("See click documentation at http://click.pocoo.org/")
return 0
if __name__ == "__main__":
sys.exit(main())
| true
| true
|
1c49903e1f2ffea5052f9e556c6d9cd76d45ad77
| 80,689
|
py
|
Python
|
selfdrive/car/toyota/values.py
|
mohammedx49/ArnePilot01
|
81af1abadc9a9d572919bafeeb698f2a989d363b
|
[
"MIT"
] | null | null | null |
selfdrive/car/toyota/values.py
|
mohammedx49/ArnePilot01
|
81af1abadc9a9d572919bafeeb698f2a989d363b
|
[
"MIT"
] | null | null | null |
selfdrive/car/toyota/values.py
|
mohammedx49/ArnePilot01
|
81af1abadc9a9d572919bafeeb698f2a989d363b
|
[
"MIT"
] | null | null | null |
# flake8: noqa
from selfdrive.car import dbc_dict
from cereal import car
Ecu = car.CarParams.Ecu
# Steer torque limits
class SteerLimitParams:
STEER_MAX = 1500
STEER_DELTA_UP = 10 # 1.5s time to peak torque
STEER_DELTA_DOWN = 44 # always lower than 45 otherwise the Rav4 faults (Prius seems ok with 50)
STEER_ERROR_MAX = 350 # max delta between torque cmd and torque motor
class CAR:
PRIUS = "TOYOTA PRIUS 2017"
PRIUS_2019 = "TOYOTA PRIUS 2019"
RAV4H = "TOYOTA RAV4 HYBRID 2017"
RAV4 = "TOYOTA RAV4 2017"
COROLLA = "TOYOTA COROLLA 2017"
COROLLA_2015 = "TOYOTA COROLLA 2015"
LEXUS_RX = "LEXUS RX 350 2017"
LEXUS_RXH = "LEXUS RX HYBRID 2017"
LEXUS_RX_TSS2 = "LEXUS RX350 2020"
LEXUS_RXH_TSS2 = "LEXUS RX450 HYBRID 2020"
CHR = "TOYOTA C-HR 2018"
CHRH = "TOYOTA C-HR HYBRID 2018"
CAMRY = "TOYOTA CAMRY 2018"
CAMRYH = "TOYOTA CAMRY HYBRID 2018"
HIGHLANDER = "TOYOTA HIGHLANDER 2017"
HIGHLANDER_TSS2 = "TOYOTA HIGHLANDER 2020"
HIGHLANDERH = "TOYOTA HIGHLANDER HYBRID 2018"
HIGHLANDERH_TSS2 = "TOYOTA HIGHLANDER HYBRID 2020"
AVALON = "TOYOTA AVALON 2016"
RAV4_TSS2 = "TOYOTA RAV4 2019"
COROLLA_TSS2 = "TOYOTA COROLLA TSS2 2019"
COROLLAH_TSS2 = "TOYOTA COROLLA HYBRID TSS2 2019"
LEXUS_ES_TSS2 = "LEXUS ES 2019"
LEXUS_ESH_TSS2 = "LEXUS ES 300H 2019"
SIENNA = "TOYOTA SIENNA XLE 2018"
LEXUS_IS = "LEXUS IS300 2018"
LEXUS_CTH = "LEXUS CT 200H 2018"
RAV4H_TSS2 = "TOYOTA RAV4 HYBRID 2019"
LEXUS_ISH = "LEXUS IS HYBRID 2017"
LEXUS_NXH = "LEXUS NX300H 2018"
LEXUS_UXH_TSS2 = "LEXUS UX 250H 2019"
class ECU:
CAM = Ecu.fwdCamera # camera
DSU = Ecu.dsu # driving support unit
APGS = Ecu.apgs # advanced parking guidance system
SMART = Ecu.unknown
# addr: (ecu, cars, bus, 1/freq*100, vl)
STATIC_MSGS = [
(0x130, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 1, 100, b'\x00\x00\x00\x00\x00\x00\x38'),
(0x240, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 1, 5, b'\x00\x10\x01\x00\x10\x01\x00'),
(0x241, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 1, 5, b'\x00\x10\x01\x00\x10\x01\x00'),
(0x244, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 1, 5, b'\x00\x10\x01\x00\x10\x01\x00'),
(0x245, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 1, 5, b'\x00\x10\x01\x00\x10\x01\x00'),
(0x248, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 1, 5, b'\x00\x00\x00\x00\x00\x00\x01'),
(0x367, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 0, 40, b'\x06\x00'),
(0x414, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 0, 100, b'\x00\x00\x00\x00\x00\x00\x17\x00'),
(0x466, Ecu.fwdCamera, (CAR.COROLLA, CAR.COROLLA_2015), 1, 100, b'\x24\x20\xB1'),
(0x489, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 0, 100, b'\x00\x00\x00\x00\x00\x00\x00'),
(0x48a, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 0, 100, b'\x00\x00\x00\x00\x00\x00\x00'),
(0x48b, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 0, 100, b'\x66\x06\x08\x0a\x02\x00\x00\x00'),
(0x4d3, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015), 0, 100, b'\x1C\x00\x00\x01\x00\x00\x00\x00'),
(0x128, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.AVALON), 1, 3, b'\xf4\x01\x90\x83\x00\x37'),
(0x128, Ecu.dsu, (CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.SIENNA, CAR.LEXUS_CTH), 1, 3, b'\x03\x00\x20\x00\x00\x52'),
(0x141, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 1, 2, b'\x00\x00\x00\x46'),
(0x160, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 1, 7, b'\x00\x00\x08\x12\x01\x31\x9c\x51'),
(0x161, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.AVALON, CAR.LEXUS_RX), 1, 7, b'\x00\x1e\x00\x00\x00\x80\x07'),
(0X161, Ecu.dsu, (CAR.HIGHLANDERH, CAR.HIGHLANDER, CAR.SIENNA, CAR.LEXUS_CTH), 1, 7, b'\x00\x1e\x00\xd4\x00\x00\x5b'),
(0x283, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 0, 3, b'\x00\x00\x00\x00\x00\x00\x8c'),
(0x2E6, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH), 0, 3, b'\xff\xf8\x00\x08\x7f\xe0\x00\x4e'),
(0x2E7, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH), 0, 3, b'\xa8\x9c\x31\x9c\x00\x00\x00\x02'),
(0x33E, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH), 0, 20, b'\x0f\xff\x26\x40\x00\x1f\x00'),
(0x2E6, Ecu.unknown, (CAR.RAV4H, CAR.LEXUS_RXH), 0, 3, b'\xff\xf8\x00\x08\x7f\xe0\x00\x4e'),
(0x2E7, Ecu.unknown, (CAR.RAV4H, CAR.LEXUS_RXH), 0, 3, b'\xa8\x9c\x31\x9c\x00\x00\x00\x02'),
(0x33E, Ecu.unknown, (CAR.RAV4H, CAR.LEXUS_RXH), 0, 20, b'\x0f\xff\x26\x40\x00\x1f\x00'),
(0x344, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 0, 5, b'\x00\x00\x01\x00\x00\x00\x00\x50'),
(0x365, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.HIGHLANDERH), 0, 20, b'\x00\x00\x00\x80\x03\x00\x08'),
(0x365, Ecu.dsu, (CAR.RAV4, CAR.RAV4H, CAR.COROLLA, CAR.HIGHLANDER, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 0, 20, b'\x00\x00\x00\x80\xfc\x00\x08'),
(0x366, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.HIGHLANDERH), 0, 20, b'\x00\x00\x4d\x82\x40\x02\x00'),
(0x366, Ecu.dsu, (CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 0, 20, b'\x00\x72\x07\xff\x09\xfe\x00'),
(0x470, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.LEXUS_RXH), 1, 100, b'\x00\x00\x02\x7a'),
(0x470, Ecu.dsu, (CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.RAV4H, CAR.SIENNA, CAR.LEXUS_CTH), 1, 100, b'\x00\x00\x01\x79'),
(0x4CB, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDERH, CAR.HIGHLANDER, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 0, 100, b'\x0c\x00\x00\x00\x00\x00\x00\x00'),
]
ECU_FINGERPRINT = {
Ecu.fwdCamera: [0x2e4], # steer torque cmd
Ecu.dsu: [0x283], # accel cmd
}
FINGERPRINTS = {
CAR.RAV4: [
{36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 512: 6, 513: 6, 547: 8, 548: 8, 552: 4, 562: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 767:4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 4, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1008: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1596: 8, 1597: 8, 1600: 8, 1656: 8, 1664: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2015: 8, 2024: 8}
],
CAR.RAV4H: [
{36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 218: 8, 296: 8, 355: 5, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 512: 6, 513: 6, 515: 3, 547: 8, 548: 8, 550: 8, 552: 4, 560: 7, 562: 4, 581: 5, 608: 8, 610: 5, 643: 7, 705: 8, 713: 8, 725: 2, 740: 5, 742: 8, 743: 8, 767: 4, 800: 8, 830: 7, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 922: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 3, 955: 8, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1008: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1207: 8, 1212: 8, 1227: 8, 1228: 8, 1232: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1596: 8, 1597: 8, 1600: 8, 1656: 8, 1664: 8, 1728: 8, 1745: 8, 1779: 8, 1792: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1937: 8, 1953: 8, 1961: 8, 1968: 8, 1976: 8, 1984: 8, 1990: 8, 1992: 8, 1998: 8, 2016: 8, 2018: 8, 2019: 8, 2022: 8, 2024: 8, 2026: 8}
],
CAR.PRIUS: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 512: 6, 513: 6, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 713: 8, 740: 5, 742: 8, 743: 8, 764: 8, 767: 4, 800: 8, 810: 2, 814: 8, 824: 2, 825: 8, 829: 2, 830: 7, 835: 8, 836: 8, 845: 5, 861: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 875: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 974: 8, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1005: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1083: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1130: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1175: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1595: 8, 1649: 8, 1681: 8, 1767:4, 1777: 8, 1779: 8, 1792: 8, 1840: 8, 1863:8, 1872: 8, 1904: 8, 1912: 8, 1941: 8, 1949: 8, 1952: 8, 1960: 8, 1984: 8, 1988: 8, 1990: 8, 1992: 8, 1996:8, 1998: 8, 2004: 8, 2010: 8, 2012: 8, 2015: 8, 2016: 8, 2018: 8, 2024: 8, 2026: 8, 2027: 8, 2029: 8, 2030: 8, 2031: 8}
],
CAR.PRIUS_2019: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 713: 8, 740: 5, 742: 8, 743: 8, 764: 8, 767: 4, 800: 8, 810: 2, 814: 8, 818: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 845: 5, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 889: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 974: 8, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1227: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1595: 8, 1649: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2002: 8, 2010: 8}
],
CAR.COROLLA: [
{36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 512: 6, 513: 6, 547: 8, 548: 8, 552: 4, 608: 8, 610: 5, 643: 7, 705: 8, 740: 5, 767:4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 2, 921: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 4, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1196: 8, 1227: 8, 1235: 8, 1279: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1596: 8, 1597: 8, 1600: 8, 1664: 8, 1728: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2016: 8, 2017: 8, 2018: 8, 2019: 8, 2020: 8, 2021: 8, 2022: 8, 2023: 8, 2024: 8}
],
CAR.COROLLA_2015: [
{32: 4, 36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 456: 8, 464: 8, 466: 8, 467: 8, 513: 6, 547: 8, 548: 8, 552: 4, 608: 8, 610: 5, 611: 7, 705: 8, 800: 8, 849: 4, 852: 1, 865: 8, 896: 8, 897: 8, 898: 8, 899: 8, 900: 6, 902: 6, 903: 8, 905: 8, 906: 5, 910: 8, 911: 8, 916: 2, 921: 8, 928: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 4, 956: 8, 976: 1, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1017: 8, 1024: 8, 1043: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1078: 8, 1079: 8, 1088: 8, 1090: 8, 1091: 8, 1196: 8, 1217: 8, 1219: 8, 1222: 8, 1224: 8, 1244: 8, 1245: 8, 1279: 8, 1552: 8, 1553: 8, 1555: 8, 1556: 8, 1557: 8, 1560: 8, 1561: 8, 1562: 8, 1564: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1574: 8, 1592: 8, 1596: 8, 1597: 8, 1600: 8, 1664: 8, 1761: 8, 1762: 8}
],
CAR.LEXUS_RX: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 5, 643: 7, 658: 8, 705: 8, 740: 5, 742: 8, 743: 8, 767: 4, 800: 8, 810: 2, 812: 3, 814: 8, 818: 8, 819: 8, 820: 8, 821: 8, 822: 8, 830: 7, 835: 8, 836: 8, 845: 5, 869: 7, 870: 7, 871: 2, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1063: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1349: 8, 1350: 8, 1351: 8, 1413: 8, 1414: 8, 1415: 8, 1416: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1595: 8, 1777: 8, 1779: 8, 1792: 8, 1800: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1937: 8, 1945: 8, 1953: 8, 1961: 8, 1968: 8, 1976: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.LEXUS_RXH: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 512: 6, 513: 6, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 5, 643: 7, 658: 8, 713: 8, 740: 5, 742: 8, 743: 8, 744: 8, 767: 4, 800: 8, 810: 2, 812: 3, 814: 8, 818: 8, 819: 8, 820: 8, 821: 8, 822: 8, 830: 7, 835: 8, 836: 8, 845: 5, 863: 8, 869: 7, 869: 7, 870: 7, 871: 2, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 6, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1059: 1, 1063: 8, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1349: 8, 1350: 8, 1351: 8, 1413: 8, 1414: 8, 1415: 8, 1416: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1592: 8, 1594: 8, 1595: 8, 1745: 8, 1777: 8, 1779: 8, 1808: 8, 1810: 8, 1816: 8, 1818: 8, 1840: 8, 1848: 8, 1904: 8, 1912: 8, 1940: 8, 1941: 8, 1948: 8, 1949: 8, 1952: 8, 1956: 8, 1960: 8, 1964: 8, 1986: 8, 1990: 8, 1994: 8, 1998: 8, 2004: 8, 2012: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.LEXUS_RX_TSS2: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 401: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 740: 5, 742: 8, 743: 8, 764: 8, 765: 8, 767: 4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 824: 8, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 891: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 987: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1063: 8, 1076: 8, 1077: 8,1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1592: 8, 1594:8, 1595: 8, 1600: 8, 1649: 8, 1775: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1792: 8, 1800: 8, 1808: 8, 1816: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1937: 8, 1945: 8, 1953: 8, 1961: 8, 1968: 8, 1976: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.CHR: [
{36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 705: 8, 740: 5, 767:4, 800: 8, 810: 2, 812: 8, 814: 8, 830: 7, 835: 8, 836: 8, 845: 5, 869: 7, 870: 7, 871: 2, 898: 8, 913: 8, 918: 8, 921: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 1014: 8, 1017: 8, 1020: 8, 1021: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1082: 8, 1083: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1175: 8, 1228: 8, 1235: 8, 1237: 8, 1279: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1745: 8, 1779: 8}
],
CAR.CHRH: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 713: 8, 740: 5, 767:4, 800: 8, 810: 2, 812: 8, 814: 8, 829: 2, 830: 7, 835: 8, 836: 8, 845: 5, 869: 7, 870: 7, 871: 2, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1014: 8, 1017: 8, 1020: 8, 1021: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1083: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1175: 8, 1228: 8, 1235: 8, 1237: 8, 1279: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2015: 8, 2024: 8, 2026: 8, 2030: 8}
],
CAR.CAMRY: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 513: 6, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 728: 8, 740: 5, 761: 8, 764: 8, 767: 4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 888: 8, 889: 8, 891: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 942: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 983: 8, 984: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1011: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1412: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1745: 8, 1767: 4, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1792: 8, 1808: 8, 1816: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1937: 8, 1945: 8, 1953: 8, 1956: 8, 1961: 8, 1964: 8, 1968: 8, 1976: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.CAMRYH: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 658: 8, 713: 8, 728: 8, 740: 5, 761: 8, 764: 8, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 888: 8, 889: 8, 891: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 942: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 983: 8, 984: 8, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1011: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1412: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1745: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1808: 8, 1810: 8, 1816: 8, 1818: 8, 1872: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.HIGHLANDER: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 238: 4, 355: 5, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 545: 5, 550: 8, 552: 4, 562: 6, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 845: 5, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 900: 6, 902: 6, 905: 8, 911: 8, 913: 8, 916: 3, 918: 7, 921: 8, 922: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1008: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1200: 8, 1201: 8, 1202: 8, 1203: 8, 1206: 8, 1207: 8, 1212: 8, 1227: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1585: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1984: 8, 1988: 8, 1990: 8, 1992: 8, 1996: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.HIGHLANDER_TSS2: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 355: 5, 401: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 565: 8, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 728: 8, 740: 5, 761: 8, 764: 8, 765: 8, 767:4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 824: 8, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 885: 8, 889: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 987: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1063: 8, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1696: 8, 1775: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1808: 8, 1816: 8, 1904: 8, 1912: 8, 1952: 8, 1960: 8, 1990: 8, 1998: 8}
],
CAR.HIGHLANDERH: [
{36: 8, 37: 8, 170: 8, 180: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 581: 5, 608: 8, 610: 5, 643: 7, 713: 8, 740: 5, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 3, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1076: 8, 1077: 8, 1112: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1212: 8, 1227: 8, 1232: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1552: 8, 1553: 8, 1554: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.HIGHLANDERH_TSS2: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 658: 8, 713: 8, 728: 8, 740: 5, 761: 8, 764: 8, 765: 8, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 885: 8, 889: 8, 891: 8, 896: 8, 898: 8, 900: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 942: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 987: 8, 993: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1063: 8, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1263: 8, 1264: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1696: 8, 1745: 8, 1775: 8, 1779: 8}
],
CAR.AVALON: [
{36: 8, 37: 8, 170: 8, 180: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 547: 8, 550: 8, 552: 4, 562: 6, 608: 8, 610: 5, 643: 7, 705: 8, 740: 5, 767:4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 905: 8, 911: 1, 916: 2, 921: 8, 933: 6, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 1005: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1200: 8, 1201: 8, 1202: 8, 1203: 8, 1206: 8, 1227: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1558: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1596: 8, 1597: 8, 1664: 8, 1728: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.RAV4_TSS2: [
{36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 355: 5, 401: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 565: 8, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 728: 8, 740: 5, 742: 8, 743: 8, 761: 8, 764: 8, 765: 8, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 882: 8, 885: 8, 889: 8, 891: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 987: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1063: 8, 1076: 8, 1077: 8, 1082: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1600: 8, 1649: 8, 1696: 8, 1745: 8, 1775: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1872: 8, 1880:8 , 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.COROLLA_TSS2: [
{36: 8, 37: 8, 114: 5, 170: 8, 180: 8, 186: 4, 401: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 643: 7, 705: 8, 728: 8, 740: 5, 742: 8, 743: 8, 761: 8, 764: 8, 765: 8, 767:4, 800: 8, 810: 2, 812: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1235: 8, 1237: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1595: 8, 1649: 8, 1745: 8, 1775: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1808: 8, 1809: 8, 1816: 8, 1817: 8, 1840: 8, 1848: 8, 1904: 8, 1912: 8, 1940: 8, 1941: 8, 1948: 8, 1949: 8, 1952: 8, 1960: 8, 1981: 8, 1986: 8, 1990: 8, 1994: 8, 1998: 8, 2004: 8}
],
CAR.COROLLAH_TSS2: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 713: 8, 728: 8, 740: 5, 742: 8, 743: 8, 761: 8, 764: 8, 765: 8, 767: 4, 800: 8, 810: 2, 812: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 885: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 7, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 987: 8, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1084: 8, 1085: 8, 1086: 8, 1112: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1235: 8, 1237: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1600: 8, 1649: 8, 1745: 8, 1775: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.LEXUS_ES_TSS2: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 401: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 728: 8, 740: 5, 761: 8, 764: 8, 765: 8, 767:4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 824: 8, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 882: 8, 885: 8, 889: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 987: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1696: 8, 1775: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.LEXUS_ESH_TSS2: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 658: 8, 713: 8, 728: 8, 740: 5, 742: 8, 743: 8, 744: 8, 761: 8, 764: 8, 765: 8, 767:4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 882: 8, 885: 8, 889: 8, 891: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 942: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 983: 8, 984: 8, 987: 8, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1696: 8, 1775: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.SIENNA: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 238: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 545: 5, 548: 8, 550: 8, 552: 4, 562: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 764: 8, 800: 8, 824: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 888: 8, 896: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 1, 918: 7, 921: 8, 933: 8, 944: 6, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1008: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1114: 8, 1160: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1200: 8, 1201: 8, 1202: 8, 1203: 8, 1212: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1279: 8, 1552: 8, 1553: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1656: 8, 1664: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.LEXUS_IS: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 238: 4, 400: 6, 426: 6, 452: 8, 464: 8, 466: 8, 467: 5, 544: 4, 550: 8, 552: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 738: 2, 740: 5, 744: 8, 800: 8, 815: 8, 836: 8, 845: 5, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 913: 8, 914: 2, 916: 3, 917: 5, 918: 7, 921: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1008: 2, 1009: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1112: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1168: 1, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1184: 8, 1185: 8, 1186: 8, 1187: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1193: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1200: 8, 1201: 8, 1202: 8, 1203: 8, 1206: 8, 1208: 8, 1212: 8, 1220: 8, 1226: 8, 1227: 8, 1235: 8, 1237: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1584: 8, 1589: 8, 1590: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1648: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.LEXUS_ISH: [
{36: 8, 37: 8, 170: 8, 180: 8, 295: 8, 296: 8, 400: 6, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 581: 5, 608: 8, 610: 5, 643: 7, 713: 8, 740: 5, 800: 8, 836: 8, 845: 5, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 913: 8, 916: 3, 918: 7, 921: 7, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 3, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1009: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1112: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1168: 1, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1187: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1208: 8, 1212: 8, 1227: 8, 1232: 8, 1235: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1728: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.LEXUS_CTH: [
{36: 8, 37: 8, 170: 8, 180: 8, 288: 8, 426: 6, 452: 8, 466: 8, 467: 8, 548: 8, 552: 4, 560: 7, 581: 5, 608: 8, 610: 5, 643: 7, 713: 8, 740: 5, 800: 8, 810: 2, 832: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 1, 921: 8, 933: 8, 944: 6, 945: 8, 950: 8, 951: 8, 953: 3, 955: 4, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1056: 8, 1057: 8, 1059: 1, 1076: 8, 1077: 8, 1114: 8, 1116: 8, 1160: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1190: 8, 1191: 8, 1192: 8, 1227: 8, 1235: 8, 1279: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1558: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1664: 8, 1728: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.RAV4H_TSS2: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 658: 8, 713: 8, 728: 8, 740: 5, 742: 8, 743: 8, 761: 8, 764: 8, 765: 8, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 882: 8, 885: 8, 889: 8, 891: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 987: 8, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1063: 8, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1696: 8, 1745: 8, 1775: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1792: 8, 1800: 8, 1808: 8, 1810: 8, 1816: 8, 1818: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1937: 8, 1945: 8, 1952: 8, 1953: 8, 1961: 8, 1968: 8, 1976: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.LEXUS_NXH: [
{36: 8, 37: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 5, 643: 7, 713: 8, 740: 5, 742: 8, 743: 8, 764: 8, 800: 8, 810: 2, 812: 3, 818: 8, 822: 8, 824: 8, 835: 8, 836: 8, 845: 5, 849: 4, 869: 7, 870: 7, 871: 2, 889: 8, 891: 8, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 913: 8, 916: 3, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 3, 955: 8, 956: 8, 979: 2, 987: 8, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1006: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1056: 8, 1057: 8, 1059: 1, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1168: 1, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1195: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1208: 8, 1212: 8, 1227: 8, 1228: 8, 1232: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1728: 8, 1745: 8, 1777: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.LEXUS_UXH_TSS2: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 658: 8, 713: 8, 728: 8, 740: 5, 742: 8, 743: 8, 761: 8, 764: 8, 765: 8, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 882: 8, 885: 8, 889: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 942: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 987: 8, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1063: 8, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1775: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1792: 8, 1800: 8, 1808: 8, 1810: 8, 1813: 8, 1814: 8, 1816: 8, 1818: 8, 1821: 8, 1822: 8, 1840: 8, 1848: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1937: 8, 1940: 8, 1941: 8, 1945: 8, 1948: 8, 1949: 8, 1952: 8, 1953: 8, 1956: 8, 1960: 8, 1961: 8, 1964: 8, 1968: 8, 1976: 8, 1986: 8, 1990: 8, 1994: 8, 1998: 8, 2004: 8, 2012: 8, 2015: 8, 2016: 8, 2024: 8}
],
}
# Don't use theses fingerprints for fingerprinting, they are still needed for ECU detection
IGNORED_FINGERPRINTS = [CAR.LEXUS_RXH_TSS2]
FW_VERSIONS = {
CAR.AVALON: {
(Ecu.esp, 0x7b0, None): [b'F152607060\x00\x00\x00\x00\x00\x00'],
(Ecu.dsu, 0x791, None): [
b'881510705200\x00\x00\x00\x00',
b'881510701300\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [b'8965B41051\x00\x00\x00\x00\x00\x00'],
(Ecu.engine, 0x7e0, None): [
b'\x0230721100\x00\x00\x00\x00\x00\x00\x00\x00A0C01000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230721200\x00\x00\x00\x00\x00\x00\x00\x00A0C01000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0701100\x00\x00\x00\x00',
b'8646F0703000\x00\x00\x00\x00',
],
},
CAR.CAMRY: {
(Ecu.engine, 0x700, None): [
b'\x018966306L3100\x00\x00\x00\x00',
b'\x018966306L4200\x00\x00\x00\x00',
b'\x018966306L5200\x00\x00\x00\x00',
b'\x018966306Q3100\x00\x00\x00\x00',
b'\x018966306Q4000\x00\x00\x00\x00',
b'\x018966306Q4100\x00\x00\x00\x00',
b'\x018966333P3100\x00\x00\x00\x00',
b'\x018966333P3200\x00\x00\x00\x00',
b'\x018966333P4200\x00\x00\x00\x00',
b'\x018966333P4300\x00\x00\x00\x00',
b'\x018966333P4400\x00\x00\x00\x00',
b'\x018966333P4500\x00\x00\x00\x00',
b'\x018966333P4700\x00\x00\x00\x00',
b'\x018966333Q6000\x00\x00\x00\x00',
b'\x018966333Q6200\x00\x00\x00\x00',
b'\x018966306Q4100\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0603300 ',
b'8821F0607200 ',
b'8821F0608000 ',
],
(Ecu.esp, 0x7b0, None): [
b'F152606210\x00\x00\x00\x00\x00\x00',
b'F152606230\x00\x00\x00\x00\x00\x00',
b'F152606290\x00\x00\x00\x00\x00\x00',
b'F152633540\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33540\x00\x00\x00\x00\x00\x00',
b'8965B33542\x00\x00\x00\x00\x00\x00',
b'8965B33580\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [ # Same as 0x791
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0603300 ',
b'8821F0607200 ',
b'8821F0608000 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0601200 ',
b'8646F0601300 ',
b'8646F0603400 ',
b'8646F0605000 ',
b'8646F0606000 ',
],
},
CAR.CAMRYH: {
(Ecu.engine, 0x700, None): [
b'\x018966333N4300\x00\x00\x00\x00',
b'\x018966333X0000\x00\x00\x00\x00',
b'\x028966306B2100\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306B2300\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8100\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8200\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8300\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8400\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306R5000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306R5000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306R6000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306R6000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306S0000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306S0100\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306S1100\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152633214\x00\x00\x00\x00\x00\x00',
b'F152633660\x00\x00\x00\x00\x00\x00',
b'F152633712\x00\x00\x00\x00\x00\x00',
b'F152633713\x00\x00\x00\x00\x00\x00',
b'F152633B51\x00\x00\x00\x00\x00\x00',
b'F152633B60\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0603400 ',
b'8821F0604200 ',
b'8821F0606200 ',
b'8821F0607200 ',
b'8821F0608000 ',
b'8821F0609000 ',
b'8821F0609100 ',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33540\x00\x00\x00\x00\x00\x00',
b'8965B33542\x00\x00\x00\x00\x00\x00',
b'8965B33550\x00\x00\x00\x00\x00\x00',
b'8965B33551\x00\x00\x00\x00\x00\x00',
b'8965B33580\x00\x00\x00\x00\x00\x00',
b'8965B33581\x00\x00\x00\x00\x00\x00',
b'8965B33611\x00\x00\x00\x00\x00\x00',
b'8965B33621\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [ # Same as 0x791
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0603400 ',
b'8821F0604200 ',
b'8821F0606200 ',
b'8821F0607200 ',
b'8821F0608000 ',
b'8821F0609000 ',
b'8821F0609100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0601200 ',
b'8646F0601300 ',
b'8646F0601400 ',
b'8646F0603500 ',
b'8646F0604100 ',
b'8646F0605000 ',
b'8646F0606000 ',
b'8646F0606100 ',
b'8646F0607000 ',
b'8646F0607100 ',
],
},
CAR.CHR: {
(Ecu.engine, 0x700, None): [
b'\x01896631017100\x00\x00\x00\x00',
b'\x01896631017200\x00\x00\x00\x00',
b'\x0189663F413100\x00\x00\x00\x00',
b'\x0189663F414100\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0W01000 ',
b'8821FF401600 ',
b'8821FF404100 ',
b'8821FF405100 ',
b'8821FF406000 ',
b'8821F0W01100 ',
],
(Ecu.esp, 0x7b0, None): [
b'F152610020\x00\x00\x00\x00\x00\x00',
b'F152610153\x00\x00\x00\x00\x00\x00',
b'F1526F4034\x00\x00\x00\x00\x00\x00',
b'F1526F4044\x00\x00\x00\x00\x00\x00',
b'F1526F4073\x00\x00\x00\x00\x00\x00',
b'F1526F4122\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B10011\x00\x00\x00\x00\x00\x00',
b'8965B10040\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x033F401100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203102\x00\x00\x00\x00',
b'\x033F424000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
b'\x0331024000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F0W01000 ',
b'8821FF401600 ',
b'8821FF404100 ',
b'8821FF405100 ',
b'8821FF406000 ',
b'8821F0W01100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646FF401800 ',
b'8646FF404000 ',
b'8646FF406000 ',
],
},
CAR.CHRH: {
(Ecu.engine, 0x700, None): [
b'\x0289663F423000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0289663F431000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0189663F438000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152610040\x00\x00\x00\x00\x00\x00',
b'F152610190\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821FF404000 ',
b'8821FF407100 ',
],
(Ecu.eps, 0x7a1, None): [
b'8965B10040\x00\x00\x00\x00\x00\x00',
b'8965B10050\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821FF404000 ',
b'8821FF407100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646FF404000 ',
b'8646FF407000 ',
],
},
CAR.COROLLA: {
(Ecu.engine, 0x7e0, None): [
b'\x01896630E88000\x00\x00\x00\x00',
b'\x0230ZC2000\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC2100\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC2200\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC2300\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3000\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3200\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3300\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0330ZC1200\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510201100\x00\x00\x00\x00',
b'881510201200\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152602190\x00\x00\x00\x00\x00\x00',
b'F152602191\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B02181\x00\x00\x00\x00\x00\x00',
b'8965B02191\x00\x00\x00\x00\x00\x00',
b'8965B48150\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0201101\x00\x00\x00\x00',
b'8646F0201200\x00\x00\x00\x00',
b'8646F0E01300\x00\x00\x00\x00',
],
},
CAR.COROLLA_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630ZG5000\x00\x00\x00\x00',
b'\x01896630ZG5100\x00\x00\x00\x00',
b'\x01896630ZG5200\x00\x00\x00\x00',
b'\x01896630ZG5300\x00\x00\x00\x00',
b'\x01896630ZQ5000\x00\x00\x00\x00',
b'\x018966312L8000\x00\x00\x00\x00',
b'\x018966312P9000\x00\x00\x00\x00',
b'\x018966312P9100\x00\x00\x00\x00',
b'\x018966312P9200\x00\x00\x00\x00',
b'\x018966312R0100\x00\x00\x00\x00',
b'\x018966312R1000\x00\x00\x00\x00',
b'\x018966312R1100\x00\x00\x00\x00',
b'\x018966312R3100\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x03312N6000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
b'\x03312N6000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
b'\x03312N6100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
b'\x03312N6100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203402\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B12361\x00\x00\x00\x00\x00\x00',
b'\x018965B12350\x00\x00\x00\x00\x00\x00',
b'\x018965B12500\x00\x00\x00\x00\x00\x00',
b'\x018965B12520\x00\x00\x00\x00\x00\x00',
b'\x018965B12530\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152602191\x00\x00\x00\x00\x00\x00',
b'\x01F152602280\x00\x00\x00\x00\x00\x00',
b'\x01F152602560\x00\x00\x00\x00\x00\x00',
b'\x01F152612641\x00\x00\x00\x00\x00\x00',
b'\x01F152612651\x00\x00\x00\x00\x00\x00',
b'\x01F152612B10\x00\x00\x00\x00\x00\x00',
b'\x01F152612B60\x00\x00\x00\x00\x00\x00',
b'\x01F152612B61\x00\x00\x00\x00\x00\x00',
b'\x01F152612B90\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F12010D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201100\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1202000\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F1202100\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.COROLLAH_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630ZJ1000\x00\x00\x00\x00',
b'\x018966342M5000\x00\x00\x00\x00',
b'\x02896630ZQ3000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896630ZR2000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966312Q4000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x038966312L7000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF1205001\x00\x00\x00\x00',
b'\x038966312N1000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
b'\x038966312L7000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF1205001\x00\x00\x00\x00',
b'\x02896630ZN8000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B12361\x00\x00\x00\x00\x00\x00',
b'8965B12451\x00\x00\x00\x00\x00\x00',
b'\x018965B12350\x00\x00\x00\x00\x00\x00',
b'\x018965B12470\x00\x00\x00\x00\x00\x00',
b'\x018965B12500\x00\x00\x00\x00\x00\x00',
b'\x018965B12530\x00\x00\x00\x00\x00\x00',
b'\x018965B12490\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152612A00\x00\x00\x00\x00\x00\x00',
b'F152612590\x00\x00\x00\x00\x00\x00',
b'F152612691\x00\x00\x00\x00\x00\x00',
b'F152612692\x00\x00\x00\x00\x00\x00',
b'F152612700\x00\x00\x00\x00\x00\x00',
b'F152612800\x00\x00\x00\x00\x00\x00',
b'F152612840\x00\x00\x00\x00\x00\x00',
b'F152612A10\x00\x00\x00\x00\x00\x00',
b'F152642540\x00\x00\x00\x00\x00\x00',
b'F152612820\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F1201100\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201300\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F1202000\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203400\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F1201300\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.HIGHLANDER: {
(Ecu.engine, 0x700, None): [
b'\x01896630E09000\x00\x00\x00\x00',
b'\x01896630E43100\x00\x00\x00\x00',
b'\x01896630E43200\x00\x00\x00\x00',
b'\x01896630E44200\x00\x00\x00\x00',
b'\x01896630E45000\x00\x00\x00\x00',
b'\x01896630E45100\x00\x00\x00\x00',
b'\x01896630E45200\x00\x00\x00\x00',
b'\x01896630E74000\x00\x00\x00\x00',
b'\x01896630E76000\x00\x00\x00\x00',
b'\x01896630E83000\x00\x00\x00\x00',
b'\x01896630E84000\x00\x00\x00\x00',
b'\x01896630E85000\x00\x00\x00\x00',
b'\x01896630E88000\x00\x00\x00\x00',
b'\x01896630E09000\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B48140\x00\x00\x00\x00\x00\x00',
b'8965B48150\x00\x00\x00\x00\x00\x00',
b'8965B48210\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [b'F15260E011\x00\x00\x00\x00\x00\x00'],
(Ecu.dsu, 0x791, None): [
b'881510E01100\x00\x00\x00\x00',
b'881510E01200\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0E01200\x00\x00\x00\x00',
b'8646F0E01300\x00\x00\x00\x00',
],
},
CAR.HIGHLANDERH: {
(Ecu.eps, 0x7a1, None): [
b'8965B48160\x00\x00\x00\x00\x00\x00'
],
(Ecu.esp, 0x7b0, None): [
b'F152648541\x00\x00\x00\x00\x00\x00',
b'F152648542\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0230E40000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230EA2000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0E01200\x00\x00\x00\x00',
b'8646F0E01300\x00\x00\x00\x00',
],
},
CAR.HIGHLANDER_TSS2: {
(Ecu.eps, 0x7a1, None): [
b'8965B48241\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F15260E051\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x01896630E64100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F0E02100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
CAR.HIGHLANDERH_TSS2: {
(Ecu.eps, 0x7a1, None): [
b'8965B48241\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F15264872300\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x02896630E66000\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F0E02100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F0E02100\x00\x00\x00\x00!!!!!!!!!!!!!!!!',
],
},
CAR.LEXUS_IS: {
(Ecu.engine, 0x700, None): [
b'\x018966353M7100\x00\x00\x00\x00',
b'\x018966353Q2300\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [b'F152653330\x00\x00\x00\x00\x00\x00'],
(Ecu.dsu, 0x791, None): [
b'881515306400\x00\x00\x00\x00',
b'881515306500\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [b'8965B53271\x00\x00\x00\x00\x00\x00'],
(Ecu.fwdRadar, 0x750, 0xf): [b'8821F4702300\x00\x00\x00\x00'],
(Ecu.fwdCamera, 0x750, 0x6d): [b'8646F5301400\x00\x00\x00\x00'],
},
CAR.PRIUS: {
(Ecu.engine, 0x700, None): [
b'\x02896634761000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634761100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634761200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634763000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634765000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634769100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634774000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634774100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634774200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634782000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634784000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966347A5000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966347A8000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x03896634759100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634759200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634759200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634759300\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634760000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701002\x00\x00\x00\x00',
b'\x03896634760000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634760100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634760200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634760200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634760300\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634768000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703001\x00\x00\x00\x00',
b'\x03896634768000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703002\x00\x00\x00\x00',
b'\x03896634768100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703002\x00\x00\x00\x00',
b'\x03896634785000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4705001\x00\x00\x00\x00',
b'\x03896634786000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4705001\x00\x00\x00\x00',
b'\x03896634786000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
b'\x03896634789000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703002\x00\x00\x00\x00',
b'\x038966347A3000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4707001\x00\x00\x00\x00',
b'\x038966347B6000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
b'\x038966347B7000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B47021\x00\x00\x00\x00\x00\x00',
b'8965B47022\x00\x00\x00\x00\x00\x00',
b'8965B47023\x00\x00\x00\x00\x00\x00',
b'8965B47050\x00\x00\x00\x00\x00\x00',
b'8965B47060\x00\x00\x00\x00\x00\x00', # This is the EPS with good angle sensor
],
(Ecu.esp, 0x7b0, None): [
b'F152647290\x00\x00\x00\x00\x00\x00',
b'F152647300\x00\x00\x00\x00\x00\x00',
b'F152647310\x00\x00\x00\x00\x00\x00',
b'F152647414\x00\x00\x00\x00\x00\x00',
b'F152647415\x00\x00\x00\x00\x00\x00',
b'F152647416\x00\x00\x00\x00\x00\x00',
b'F152647417\x00\x00\x00\x00\x00\x00',
b'F152647470\x00\x00\x00\x00\x00\x00',
b'F152647490\x00\x00\x00\x00\x00\x00',
b'F152647684\x00\x00\x00\x00\x00\x00',
b'F152647862\x00\x00\x00\x00\x00\x00',
b'F152647863\x00\x00\x00\x00\x00\x00',
b'F152647864\x00\x00\x00\x00\x00\x00',
b'F152647865\x00\x00\x00\x00\x00\x00',
b'F152647470\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514702300\x00\x00\x00\x00',
b'881514703100\x00\x00\x00\x00',
b'881514704100\x00\x00\x00\x00',
b'881514706000\x00\x00\x00\x00',
b'881514706100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4201200\x00\x00\x00\x00',
b'8646F4701300\x00\x00\x00\x00',
b'8646F4702001\x00\x00\x00\x00',
b'8646F4702100\x00\x00\x00\x00',
b'8646F4702200\x00\x00\x00\x00',
b'8646F4705000\x00\x00\x00\x00',
b'8646F4705200\x00\x00\x00\x00',
],
},
CAR.PRIUS_2019: {
(Ecu.engine, 0x700, None): [
b'\x028966347A5000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x038966347B6000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B47060\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152647470\x00\x00\x00\x00\x00\x00',
b'F152647290\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514706000\x00\x00\x00\x00',
b'881514706100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4705200\x00\x00\x00\x00',
],
},
CAR.RAV4: {
(Ecu.engine, 0x7e0, None): [
b'\x02342Q1000\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q1100\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q1200\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q1300\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2000\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2100\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2200\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q4000\x00\x00\x00\x00\x00\x00\x00\x0054215000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42082\x00\x00\x00\x00\x00\x00',
b'8965B42083\x00\x00\x00\x00\x00\x00',
b'8965B42063\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F15260R102\x00\x00\x00\x00\x00\x00',
b'F15260R103\x00\x00\x00\x00\x00\x00',
b'F152642493\x00\x00\x00\x00\x00\x00',
b'F152642492\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514201200\x00\x00\x00\x00',
b'881514201300\x00\x00\x00\x00',
b'881514201400\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4201200\x00\x00\x00\x00',
b'8646F4202001\x00\x00\x00\x00',
b'8646F4202100\x00\x00\x00\x00',
],
},
CAR.RAV4H: {
(Ecu.engine, 0x7e0, None): [
b'\x02342N9000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342N9100\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342P0000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2000\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42103\x00\x00\x00\x00\x00\x00',
b'8965B42162\x00\x00\x00\x00\x00\x00',
b'8965B42163\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152642090\x00\x00\x00\x00\x00\x00',
b'F152642120\x00\x00\x00\x00\x00\x00',
b'F152642400\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514202200\x00\x00\x00\x00',
b'881514202300\x00\x00\x00\x00',
b'881514202400\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4201100\x00\x00\x00\x00',
b'8646F4201200\x00\x00\x00\x00',
b'8646F4202001\x00\x00\x00\x00',
b'8646F4202100\x00\x00\x00\x00',
b'8646F4204000\x00\x00\x00\x00',
],
},
CAR.RAV4_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630R58000\x00\x00\x00\x00',
b'\x018966342E2000\x00\x00\x00\x00',
b'\x018966342M8000\x00\x00\x00\x00',
b'\x018966342T1000\x00\x00\x00\x00',
b'\x018966342T6000\x00\x00\x00\x00',
b'\x018966342T9000\x00\x00\x00\x00',
b'\x018966342U4000\x00\x00\x00\x00',
b'\x018966342V3100\x00\x00\x00\x00',
b'\x018966342V3200\x00\x00\x00\x00',
b'\x018966342X5000\x00\x00\x00\x00',
b'\x01896634A05000\x00\x00\x00\x00',
b'\x01896634A19000\x00\x00\x00\x00',
b'\x01896634A19100\x00\x00\x00\x00',
b'\x01896634A20000\x00\x00\x00\x00',
b'\x01896634A22000\x00\x00\x00\x00',
b'\x018966342U4000\x00\x00\x00\x00',
b'\x01F152642551\x00\x00\x00\x00\x00\x00',
b'\x028966342T0000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x028966342Y8000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x02896634A18000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152642520\x00\x00\x00\x00\x00\x00',
b'\x01F15260R210\x00\x00\x00\x00\x00\x00',
b'\x01F15260R220\x00\x00\x00\x00\x00\x00',
b'\x01F15260R300\x00\x00\x00\x00\x00\x00',
b'\x01F152642551\x00\x00\x00\x00\x00\x00',
b'\x01F152642561\x00\x00\x00\x00\x00\x00',
b'\x01F152642700\x00\x00\x00\x00\x00\x00',
b'\x01F152642710\x00\x00\x00\x00\x00\x00',
b'\x01F152642750\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42170\x00\x00\x00\x00\x00\x00',
b'8965B42171\x00\x00\x00\x00\x00\x00',
b'8965B42181\x00\x00\x00\x00\x00\x00',
b'\x028965B0R01200\x00\x00\x00\x008965B0R02200\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4203200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203300\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203400\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203500\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203700\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.RAV4H_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x018966342W8000\x00\x00\x00\x00',
b'\x018966342M5000\x00\x00\x00\x00',
b'\x018966342X6000\x00\x00\x00\x00',
b'\x028966342W4001\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
b'\x02896634A23001\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
b'\x02896634A23001\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152642541\x00\x00\x00\x00\x00\x00',
b'F152642291\x00\x00\x00\x00\x00\x00',
b'F152642330\x00\x00\x00\x00\x00\x00',
b'F152642531\x00\x00\x00\x00\x00\x00',
b'F152642532\x00\x00\x00\x00\x00\x00',
b'F152642521\x00\x00\x00\x00\x00\x00',
b'F152642541\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42170\x00\x00\x00\x00\x00\x00',
b'8965B42171\x00\x00\x00\x00\x00\x00',
b'8965B42181\x00\x00\x00\x00\x00\x00',
b'\x028965B0R01200\x00\x00\x00\x008965B0R02200\x00\x00\x00\x00',
b'\x028965B0R01300\x00\x00\x00\x008965B0R02300\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4203700\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F4203200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203300\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203400\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203500\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
CAR.LEXUS_ES_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x018966333T5100\x00\x00\x00\x00',
b'\x018966333T5000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [b'\x01F152606281\x00\x00\x00\x00\x00\x00'],
(Ecu.eps, 0x7a1, None): [b'8965B33252\x00\x00\x00\x00\x00\x00'],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F3303200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F33030D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
],
},
CAR.SIENNA: {
(Ecu.engine, 0x700, None): [
b'\x01896630832100\x00\x00\x00\x00',
b'\x01896630838000\x00\x00\x00\x00',
b'\x01896630838100\x00\x00\x00\x00',
b'\x01896630842000\x00\x00\x00\x00',
b'\x01896630851000\x00\x00\x00\x00',
b'\x01896630851100\x00\x00\x00\x00',
b'\x01896630852100\x00\x00\x00\x00',
b'\x01896630859000\x00\x00\x00\x00',
b'\x01896630860000\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B45070\x00\x00\x00\x00\x00\x00',
b'8965B45082\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152608130\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510801100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702200\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0801100\x00\x00\x00\x00',
],
},
CAR.LEXUS_ESH_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x028966333S8000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966333V4000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152633423\x00\x00\x00\x00\x00\x00',
b'F152633680\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33252\x00\x00\x00\x00\x00\x00',
b'8965B33590\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F33030D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F3304100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
CAR.LEXUS_NXH: {
(Ecu.engine, 0x7e0, None): [
b'\x0237882000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0237841000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152678160\x00\x00\x00\x00\x00\x00',
b'F152678170\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881517804300\x00\x00\x00\x00',
b'881517804100\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B78100\x00\x00\x00\x00\x00\x00',
b'8965B78060\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F7801300\x00\x00\x00\x00',
b'8646F7801100\x00\x00\x00\x00',
],
},
CAR.LEXUS_RX: {
(Ecu.engine, 0x700, None): [
b'\x01896630E37200\x00\x00\x00\x00',
b'\x01896630E41000\x00\x00\x00\x00',
b'\x01896630E41200\x00\x00\x00\x00',
b'\x01896630E37300\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648472\x00\x00\x00\x00\x00\x00',
b'F152648473\x00\x00\x00\x00\x00\x00',
b'F152648492\x00\x00\x00\x00\x00\x00',
b'F152648493\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514810300\x00\x00\x00\x00',
b'881514810500\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B0E011\x00\x00\x00\x00\x00\x00',
b'8965B0E012\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4701000\x00\x00\x00\x00',
b'8821F4701100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4801100\x00\x00\x00\x00',
b'8646F4801200\x00\x00\x00\x00',
b'8646F4802001\x00\x00\x00\x00',
b'8646F4802100\x00\x00\x00\x00',
],
},
CAR.LEXUS_RXH: {
(Ecu.engine, 0x7e0, None): [
b'\x02348N0000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348Q4000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348T1100\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348V6000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348Z3000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648361\x00\x00\x00\x00\x00\x00',
b'F152648501\x00\x00\x00\x00\x00\x00',
b'F152648502\x00\x00\x00\x00\x00\x00',
b'F152648504\x00\x00\x00\x00\x00\x00',
b'F152648A30\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514811300\x00\x00\x00\x00',
b'881514811500\x00\x00\x00\x00',
b'881514811700\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B0E011\x00\x00\x00\x00\x00\x00',
b'8965B0E012\x00\x00\x00\x00\x00\x00',
b'8965B48112\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4701000\x00\x00\x00\x00',
b'8821F4701100\x00\x00\x00\x00',
b'8821F4701200\x00\x00\x00\x00',
b'8821F4701300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4801200\x00\x00\x00\x00',
b'8646F4802100\x00\x00\x00\x00',
b'8646F4802200\x00\x00\x00\x00',
b'8646F4809000\x00\x00\x00\x00',
],
},
CAR.LEXUS_RX_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630EB0000\x00\x00\x00\x00',
b'\x01896630EA9000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F15260E031\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B48271\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4810100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
CAR.LEXUS_RXH_TSS2: {
(Ecu.engine, 0x7e0, None): [
b'\x02348X8000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648831\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B48271\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4810100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
}
STEER_THRESHOLD = 100
DBC = {
CAR.RAV4H: dbc_dict('toyota_rav4_hybrid_2017_pt_generated', 'toyota_adas'),
CAR.RAV4: dbc_dict('toyota_rav4_2017_pt_generated', 'toyota_adas'),
CAR.PRIUS: dbc_dict('toyota_prius_2017_pt_generated', 'toyota_adas'),
CAR.PRIUS_2019: dbc_dict('toyota_prius_2017_pt_generated', 'toyota_adas'),
CAR.COROLLA: dbc_dict('toyota_corolla_2017_pt_generated', 'toyota_adas'),
CAR.COROLLA_2015: dbc_dict('toyota_corolla_2017_pt_generated', 'toyota_adas'),
CAR.LEXUS_RX: dbc_dict('lexus_rx_350_2016_pt_generated', 'toyota_adas'),
CAR.LEXUS_RXH: dbc_dict('lexus_rx_hybrid_2017_pt_generated', 'toyota_adas'),
CAR.LEXUS_RX_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_RXH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.CHR: dbc_dict('toyota_nodsu_pt_generated', 'toyota_adas'),
CAR.CHRH: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_adas'),
CAR.CAMRY: dbc_dict('toyota_nodsu_pt_generated', 'toyota_adas'),
CAR.CAMRYH: dbc_dict('toyota_camry_hybrid_2018_pt_generated', 'toyota_adas'),
CAR.HIGHLANDER: dbc_dict('toyota_highlander_2017_pt_generated', 'toyota_adas'),
CAR.HIGHLANDER_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.HIGHLANDERH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.HIGHLANDERH: dbc_dict('toyota_highlander_hybrid_2018_pt_generated', 'toyota_adas'),
CAR.HIGHLANDERH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.AVALON: dbc_dict('toyota_avalon_2017_pt_generated', 'toyota_adas'),
CAR.RAV4_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.RAV4H_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.COROLLA_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.COROLLAH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_ES_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_ESH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.SIENNA: dbc_dict('toyota_sienna_xle_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_IS: dbc_dict('lexus_is_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_ISH: dbc_dict('lexus_is_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_CTH: dbc_dict('lexus_ct200h_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_NXH: dbc_dict('lexus_nx300h_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_UXH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
}
NO_DSU_CAR = [CAR.CHR, CAR.CHRH, CAR.CAMRY, CAR.CAMRYH, CAR.RAV4_TSS2, CAR.COROLLA_TSS2, CAR.COROLLAH_TSS2, CAR.LEXUS_ES_TSS2, CAR.LEXUS_ESH_TSS2, CAR.RAV4H_TSS2, CAR.LEXUS_RX_TSS2, CAR.HIGHLANDER_TSS2, CAR.LEXUS_UXH_TSS2, CAR.HIGHLANDERH_TSS2]
TSS2_CAR = [CAR.RAV4_TSS2, CAR.COROLLA_TSS2, CAR.COROLLAH_TSS2, CAR.LEXUS_ES_TSS2, CAR.LEXUS_ESH_TSS2, CAR.RAV4H_TSS2, CAR.LEXUS_RX_TSS2, CAR.HIGHLANDER_TSS2, CAR.LEXUS_UXH_TSS2, CAR.HIGHLANDERH_TSS2]
NO_STOP_TIMER_CAR = [CAR.RAV4H, CAR.HIGHLANDERH, CAR.HIGHLANDER, CAR.RAV4_TSS2, CAR.COROLLA_TSS2, CAR.COROLLAH_TSS2, CAR.LEXUS_ES_TSS2, CAR.LEXUS_ESH_TSS2, CAR.SIENNA, CAR.RAV4H_TSS2, CAR.LEXUS_RX_TSS2, CAR.HIGHLANDER_TSS2, CAR.LEXUS_UXH_TSS2, CAR.HIGHLANDERH_TSS2] # no resume button press required
| 68.964957
| 1,405
| 0.603267
|
from selfdrive.car import dbc_dict
from cereal import car
Ecu = car.CarParams.Ecu
class SteerLimitParams:
STEER_MAX = 1500
STEER_DELTA_UP = 10
STEER_DELTA_DOWN = 44
STEER_ERROR_MAX = 350
class CAR:
PRIUS = "TOYOTA PRIUS 2017"
PRIUS_2019 = "TOYOTA PRIUS 2019"
RAV4H = "TOYOTA RAV4 HYBRID 2017"
RAV4 = "TOYOTA RAV4 2017"
COROLLA = "TOYOTA COROLLA 2017"
COROLLA_2015 = "TOYOTA COROLLA 2015"
LEXUS_RX = "LEXUS RX 350 2017"
LEXUS_RXH = "LEXUS RX HYBRID 2017"
LEXUS_RX_TSS2 = "LEXUS RX350 2020"
LEXUS_RXH_TSS2 = "LEXUS RX450 HYBRID 2020"
CHR = "TOYOTA C-HR 2018"
CHRH = "TOYOTA C-HR HYBRID 2018"
CAMRY = "TOYOTA CAMRY 2018"
CAMRYH = "TOYOTA CAMRY HYBRID 2018"
HIGHLANDER = "TOYOTA HIGHLANDER 2017"
HIGHLANDER_TSS2 = "TOYOTA HIGHLANDER 2020"
HIGHLANDERH = "TOYOTA HIGHLANDER HYBRID 2018"
HIGHLANDERH_TSS2 = "TOYOTA HIGHLANDER HYBRID 2020"
AVALON = "TOYOTA AVALON 2016"
RAV4_TSS2 = "TOYOTA RAV4 2019"
COROLLA_TSS2 = "TOYOTA COROLLA TSS2 2019"
COROLLAH_TSS2 = "TOYOTA COROLLA HYBRID TSS2 2019"
LEXUS_ES_TSS2 = "LEXUS ES 2019"
LEXUS_ESH_TSS2 = "LEXUS ES 300H 2019"
SIENNA = "TOYOTA SIENNA XLE 2018"
LEXUS_IS = "LEXUS IS300 2018"
LEXUS_CTH = "LEXUS CT 200H 2018"
RAV4H_TSS2 = "TOYOTA RAV4 HYBRID 2019"
LEXUS_ISH = "LEXUS IS HYBRID 2017"
LEXUS_NXH = "LEXUS NX300H 2018"
LEXUS_UXH_TSS2 = "LEXUS UX 250H 2019"
class ECU:
CAM = Ecu.fwdCamera
DSU = Ecu.dsu
APGS = Ecu.apgs
SMART = Ecu.unknown
STATIC_MSGS = [
(0x130, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 1, 100, b'\x00\x00\x00\x00\x00\x00\x38'),
(0x240, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 1, 5, b'\x00\x10\x01\x00\x10\x01\x00'),
(0x241, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 1, 5, b'\x00\x10\x01\x00\x10\x01\x00'),
(0x244, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 1, 5, b'\x00\x10\x01\x00\x10\x01\x00'),
(0x245, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 1, 5, b'\x00\x10\x01\x00\x10\x01\x00'),
(0x248, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 1, 5, b'\x00\x00\x00\x00\x00\x00\x01'),
(0x367, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 0, 40, b'\x06\x00'),
(0x414, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 0, 100, b'\x00\x00\x00\x00\x00\x00\x17\x00'),
(0x466, Ecu.fwdCamera, (CAR.COROLLA, CAR.COROLLA_2015), 1, 100, b'\x24\x20\xB1'),
(0x489, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 0, 100, b'\x00\x00\x00\x00\x00\x00\x00'),
(0x48a, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 0, 100, b'\x00\x00\x00\x00\x00\x00\x00'),
(0x48b, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015, CAR.HIGHLANDER, CAR.HIGHLANDERH), 0, 100, b'\x66\x06\x08\x0a\x02\x00\x00\x00'),
(0x4d3, Ecu.fwdCamera, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.RAV4, CAR.COROLLA, CAR.COROLLA_2015), 0, 100, b'\x1C\x00\x00\x01\x00\x00\x00\x00'),
(0x128, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.AVALON), 1, 3, b'\xf4\x01\x90\x83\x00\x37'),
(0x128, Ecu.dsu, (CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.SIENNA, CAR.LEXUS_CTH), 1, 3, b'\x03\x00\x20\x00\x00\x52'),
(0x141, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 1, 2, b'\x00\x00\x00\x46'),
(0x160, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 1, 7, b'\x00\x00\x08\x12\x01\x31\x9c\x51'),
(0x161, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.AVALON, CAR.LEXUS_RX), 1, 7, b'\x00\x1e\x00\x00\x00\x80\x07'),
(0X161, Ecu.dsu, (CAR.HIGHLANDERH, CAR.HIGHLANDER, CAR.SIENNA, CAR.LEXUS_CTH), 1, 7, b'\x00\x1e\x00\xd4\x00\x00\x5b'),
(0x283, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 0, 3, b'\x00\x00\x00\x00\x00\x00\x8c'),
(0x2E6, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH), 0, 3, b'\xff\xf8\x00\x08\x7f\xe0\x00\x4e'),
(0x2E7, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH), 0, 3, b'\xa8\x9c\x31\x9c\x00\x00\x00\x02'),
(0x33E, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH), 0, 20, b'\x0f\xff\x26\x40\x00\x1f\x00'),
(0x2E6, Ecu.unknown, (CAR.RAV4H, CAR.LEXUS_RXH), 0, 3, b'\xff\xf8\x00\x08\x7f\xe0\x00\x4e'),
(0x2E7, Ecu.unknown, (CAR.RAV4H, CAR.LEXUS_RXH), 0, 3, b'\xa8\x9c\x31\x9c\x00\x00\x00\x02'),
(0x33E, Ecu.unknown, (CAR.RAV4H, CAR.LEXUS_RXH), 0, 20, b'\x0f\xff\x26\x40\x00\x1f\x00'),
(0x344, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 0, 5, b'\x00\x00\x01\x00\x00\x00\x00\x50'),
(0x365, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.HIGHLANDERH), 0, 20, b'\x00\x00\x00\x80\x03\x00\x08'),
(0x365, Ecu.dsu, (CAR.RAV4, CAR.RAV4H, CAR.COROLLA, CAR.HIGHLANDER, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 0, 20, b'\x00\x00\x00\x80\xfc\x00\x08'),
(0x366, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.HIGHLANDERH), 0, 20, b'\x00\x00\x4d\x82\x40\x02\x00'),
(0x366, Ecu.dsu, (CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 0, 20, b'\x00\x72\x07\xff\x09\xfe\x00'),
(0x470, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.LEXUS_RXH), 1, 100, b'\x00\x00\x02\x7a'),
(0x470, Ecu.dsu, (CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.RAV4H, CAR.SIENNA, CAR.LEXUS_CTH), 1, 100, b'\x00\x00\x01\x79'),
(0x4CB, Ecu.dsu, (CAR.PRIUS, CAR.PRIUS_2019, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDERH, CAR.HIGHLANDER, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_RX), 0, 100, b'\x0c\x00\x00\x00\x00\x00\x00\x00'),
]
ECU_FINGERPRINT = {
Ecu.fwdCamera: [0x2e4],
Ecu.dsu: [0x283],
}
FINGERPRINTS = {
CAR.RAV4: [
{36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 512: 6, 513: 6, 547: 8, 548: 8, 552: 4, 562: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 767:4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 4, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1008: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1596: 8, 1597: 8, 1600: 8, 1656: 8, 1664: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2015: 8, 2024: 8}
],
CAR.RAV4H: [
{36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 218: 8, 296: 8, 355: 5, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 512: 6, 513: 6, 515: 3, 547: 8, 548: 8, 550: 8, 552: 4, 560: 7, 562: 4, 581: 5, 608: 8, 610: 5, 643: 7, 705: 8, 713: 8, 725: 2, 740: 5, 742: 8, 743: 8, 767: 4, 800: 8, 830: 7, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 922: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 3, 955: 8, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1008: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1207: 8, 1212: 8, 1227: 8, 1228: 8, 1232: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1596: 8, 1597: 8, 1600: 8, 1656: 8, 1664: 8, 1728: 8, 1745: 8, 1779: 8, 1792: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1937: 8, 1953: 8, 1961: 8, 1968: 8, 1976: 8, 1984: 8, 1990: 8, 1992: 8, 1998: 8, 2016: 8, 2018: 8, 2019: 8, 2022: 8, 2024: 8, 2026: 8}
],
CAR.PRIUS: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 512: 6, 513: 6, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 713: 8, 740: 5, 742: 8, 743: 8, 764: 8, 767: 4, 800: 8, 810: 2, 814: 8, 824: 2, 825: 8, 829: 2, 830: 7, 835: 8, 836: 8, 845: 5, 861: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 875: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 974: 8, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1005: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1083: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1130: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1175: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1595: 8, 1649: 8, 1681: 8, 1767:4, 1777: 8, 1779: 8, 1792: 8, 1840: 8, 1863:8, 1872: 8, 1904: 8, 1912: 8, 1941: 8, 1949: 8, 1952: 8, 1960: 8, 1984: 8, 1988: 8, 1990: 8, 1992: 8, 1996:8, 1998: 8, 2004: 8, 2010: 8, 2012: 8, 2015: 8, 2016: 8, 2018: 8, 2024: 8, 2026: 8, 2027: 8, 2029: 8, 2030: 8, 2031: 8}
],
CAR.PRIUS_2019: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 713: 8, 740: 5, 742: 8, 743: 8, 764: 8, 767: 4, 800: 8, 810: 2, 814: 8, 818: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 845: 5, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 889: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 974: 8, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1227: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1595: 8, 1649: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2002: 8, 2010: 8}
],
CAR.COROLLA: [
{36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 512: 6, 513: 6, 547: 8, 548: 8, 552: 4, 608: 8, 610: 5, 643: 7, 705: 8, 740: 5, 767:4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 2, 921: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 4, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1196: 8, 1227: 8, 1235: 8, 1279: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1596: 8, 1597: 8, 1600: 8, 1664: 8, 1728: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2016: 8, 2017: 8, 2018: 8, 2019: 8, 2020: 8, 2021: 8, 2022: 8, 2023: 8, 2024: 8}
],
CAR.COROLLA_2015: [
{32: 4, 36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 456: 8, 464: 8, 466: 8, 467: 8, 513: 6, 547: 8, 548: 8, 552: 4, 608: 8, 610: 5, 611: 7, 705: 8, 800: 8, 849: 4, 852: 1, 865: 8, 896: 8, 897: 8, 898: 8, 899: 8, 900: 6, 902: 6, 903: 8, 905: 8, 906: 5, 910: 8, 911: 8, 916: 2, 921: 8, 928: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 4, 956: 8, 976: 1, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1017: 8, 1024: 8, 1043: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1078: 8, 1079: 8, 1088: 8, 1090: 8, 1091: 8, 1196: 8, 1217: 8, 1219: 8, 1222: 8, 1224: 8, 1244: 8, 1245: 8, 1279: 8, 1552: 8, 1553: 8, 1555: 8, 1556: 8, 1557: 8, 1560: 8, 1561: 8, 1562: 8, 1564: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1574: 8, 1592: 8, 1596: 8, 1597: 8, 1600: 8, 1664: 8, 1761: 8, 1762: 8}
],
CAR.LEXUS_RX: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 5, 643: 7, 658: 8, 705: 8, 740: 5, 742: 8, 743: 8, 767: 4, 800: 8, 810: 2, 812: 3, 814: 8, 818: 8, 819: 8, 820: 8, 821: 8, 822: 8, 830: 7, 835: 8, 836: 8, 845: 5, 869: 7, 870: 7, 871: 2, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1063: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1349: 8, 1350: 8, 1351: 8, 1413: 8, 1414: 8, 1415: 8, 1416: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1595: 8, 1777: 8, 1779: 8, 1792: 8, 1800: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1937: 8, 1945: 8, 1953: 8, 1961: 8, 1968: 8, 1976: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.LEXUS_RXH: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 512: 6, 513: 6, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 5, 643: 7, 658: 8, 713: 8, 740: 5, 742: 8, 743: 8, 744: 8, 767: 4, 800: 8, 810: 2, 812: 3, 814: 8, 818: 8, 819: 8, 820: 8, 821: 8, 822: 8, 830: 7, 835: 8, 836: 8, 845: 5, 863: 8, 869: 7, 869: 7, 870: 7, 871: 2, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 6, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1059: 1, 1063: 8, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1349: 8, 1350: 8, 1351: 8, 1413: 8, 1414: 8, 1415: 8, 1416: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1592: 8, 1594: 8, 1595: 8, 1745: 8, 1777: 8, 1779: 8, 1808: 8, 1810: 8, 1816: 8, 1818: 8, 1840: 8, 1848: 8, 1904: 8, 1912: 8, 1940: 8, 1941: 8, 1948: 8, 1949: 8, 1952: 8, 1956: 8, 1960: 8, 1964: 8, 1986: 8, 1990: 8, 1994: 8, 1998: 8, 2004: 8, 2012: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.LEXUS_RX_TSS2: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 401: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 740: 5, 742: 8, 743: 8, 764: 8, 765: 8, 767: 4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 824: 8, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 891: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 987: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1063: 8, 1076: 8, 1077: 8,1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1592: 8, 1594:8, 1595: 8, 1600: 8, 1649: 8, 1775: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1792: 8, 1800: 8, 1808: 8, 1816: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1937: 8, 1945: 8, 1953: 8, 1961: 8, 1968: 8, 1976: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.CHR: [
{36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 705: 8, 740: 5, 767:4, 800: 8, 810: 2, 812: 8, 814: 8, 830: 7, 835: 8, 836: 8, 845: 5, 869: 7, 870: 7, 871: 2, 898: 8, 913: 8, 918: 8, 921: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 1014: 8, 1017: 8, 1020: 8, 1021: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1082: 8, 1083: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1175: 8, 1228: 8, 1235: 8, 1237: 8, 1279: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1745: 8, 1779: 8}
],
CAR.CHRH: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 713: 8, 740: 5, 767:4, 800: 8, 810: 2, 812: 8, 814: 8, 829: 2, 830: 7, 835: 8, 836: 8, 845: 5, 869: 7, 870: 7, 871: 2, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1014: 8, 1017: 8, 1020: 8, 1021: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1083: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1175: 8, 1228: 8, 1235: 8, 1237: 8, 1279: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2015: 8, 2024: 8, 2026: 8, 2030: 8}
],
CAR.CAMRY: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 513: 6, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 728: 8, 740: 5, 761: 8, 764: 8, 767: 4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 888: 8, 889: 8, 891: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 942: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 983: 8, 984: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1011: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1412: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1745: 8, 1767: 4, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1792: 8, 1808: 8, 1816: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1937: 8, 1945: 8, 1953: 8, 1956: 8, 1961: 8, 1964: 8, 1968: 8, 1976: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.CAMRYH: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 658: 8, 713: 8, 728: 8, 740: 5, 761: 8, 764: 8, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 888: 8, 889: 8, 891: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 942: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 983: 8, 984: 8, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1011: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1412: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1745: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1808: 8, 1810: 8, 1816: 8, 1818: 8, 1872: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.HIGHLANDER: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 238: 4, 355: 5, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 545: 5, 550: 8, 552: 4, 562: 6, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 845: 5, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 900: 6, 902: 6, 905: 8, 911: 8, 913: 8, 916: 3, 918: 7, 921: 8, 922: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1008: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1200: 8, 1201: 8, 1202: 8, 1203: 8, 1206: 8, 1207: 8, 1212: 8, 1227: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1585: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1984: 8, 1988: 8, 1990: 8, 1992: 8, 1996: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.HIGHLANDER_TSS2: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 355: 5, 401: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 565: 8, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 728: 8, 740: 5, 761: 8, 764: 8, 765: 8, 767:4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 824: 8, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 885: 8, 889: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 987: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1063: 8, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1696: 8, 1775: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1808: 8, 1816: 8, 1904: 8, 1912: 8, 1952: 8, 1960: 8, 1990: 8, 1998: 8}
],
CAR.HIGHLANDERH: [
{36: 8, 37: 8, 170: 8, 180: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 581: 5, 608: 8, 610: 5, 643: 7, 713: 8, 740: 5, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 3, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1076: 8, 1077: 8, 1112: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1212: 8, 1227: 8, 1232: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1552: 8, 1553: 8, 1554: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.HIGHLANDERH_TSS2: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 658: 8, 713: 8, 728: 8, 740: 5, 761: 8, 764: 8, 765: 8, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 885: 8, 889: 8, 891: 8, 896: 8, 898: 8, 900: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 942: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 987: 8, 993: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1063: 8, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1263: 8, 1264: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1696: 8, 1745: 8, 1775: 8, 1779: 8}
],
CAR.AVALON: [
{36: 8, 37: 8, 170: 8, 180: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 547: 8, 550: 8, 552: 4, 562: 6, 608: 8, 610: 5, 643: 7, 705: 8, 740: 5, 767:4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 905: 8, 911: 1, 916: 2, 921: 8, 933: 6, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 1005: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1200: 8, 1201: 8, 1202: 8, 1203: 8, 1206: 8, 1227: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1558: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1596: 8, 1597: 8, 1664: 8, 1728: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.RAV4_TSS2: [
{36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 355: 5, 401: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 565: 8, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 728: 8, 740: 5, 742: 8, 743: 8, 761: 8, 764: 8, 765: 8, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 882: 8, 885: 8, 889: 8, 891: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 987: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1063: 8, 1076: 8, 1077: 8, 1082: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1600: 8, 1649: 8, 1696: 8, 1745: 8, 1775: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1872: 8, 1880:8 , 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.COROLLA_TSS2: [
{36: 8, 37: 8, 114: 5, 170: 8, 180: 8, 186: 4, 401: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 643: 7, 705: 8, 728: 8, 740: 5, 742: 8, 743: 8, 761: 8, 764: 8, 765: 8, 767:4, 800: 8, 810: 2, 812: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1235: 8, 1237: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1595: 8, 1649: 8, 1745: 8, 1775: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1808: 8, 1809: 8, 1816: 8, 1817: 8, 1840: 8, 1848: 8, 1904: 8, 1912: 8, 1940: 8, 1941: 8, 1948: 8, 1949: 8, 1952: 8, 1960: 8, 1981: 8, 1986: 8, 1990: 8, 1994: 8, 1998: 8, 2004: 8}
],
CAR.COROLLAH_TSS2: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 713: 8, 728: 8, 740: 5, 742: 8, 743: 8, 761: 8, 764: 8, 765: 8, 767: 4, 800: 8, 810: 2, 812: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 885: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 7, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 987: 8, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1084: 8, 1085: 8, 1086: 8, 1112: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1235: 8, 1237: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1600: 8, 1649: 8, 1745: 8, 1775: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.LEXUS_ES_TSS2: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 401: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 728: 8, 740: 5, 761: 8, 764: 8, 765: 8, 767:4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 824: 8, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 882: 8, 885: 8, 889: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 987: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1696: 8, 1775: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.LEXUS_ESH_TSS2: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 658: 8, 713: 8, 728: 8, 740: 5, 742: 8, 743: 8, 744: 8, 761: 8, 764: 8, 765: 8, 767:4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 882: 8, 885: 8, 889: 8, 891: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 942: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 983: 8, 984: 8, 987: 8, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1696: 8, 1775: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.SIENNA: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 238: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 545: 5, 548: 8, 550: 8, 552: 4, 562: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 764: 8, 800: 8, 824: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 888: 8, 896: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 1, 918: 7, 921: 8, 933: 8, 944: 6, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1008: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1114: 8, 1160: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1200: 8, 1201: 8, 1202: 8, 1203: 8, 1212: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1279: 8, 1552: 8, 1553: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1656: 8, 1664: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.LEXUS_IS: [
{36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 238: 4, 400: 6, 426: 6, 452: 8, 464: 8, 466: 8, 467: 5, 544: 4, 550: 8, 552: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 738: 2, 740: 5, 744: 8, 800: 8, 815: 8, 836: 8, 845: 5, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 913: 8, 914: 2, 916: 3, 917: 5, 918: 7, 921: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1008: 2, 1009: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1112: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1168: 1, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1184: 8, 1185: 8, 1186: 8, 1187: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1193: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1200: 8, 1201: 8, 1202: 8, 1203: 8, 1206: 8, 1208: 8, 1212: 8, 1220: 8, 1226: 8, 1227: 8, 1235: 8, 1237: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1584: 8, 1589: 8, 1590: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1648: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.LEXUS_ISH: [
{36: 8, 37: 8, 170: 8, 180: 8, 295: 8, 296: 8, 400: 6, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 581: 5, 608: 8, 610: 5, 643: 7, 713: 8, 740: 5, 800: 8, 836: 8, 845: 5, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 913: 8, 916: 3, 918: 7, 921: 7, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 3, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1009: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1112: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1168: 1, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1187: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1208: 8, 1212: 8, 1227: 8, 1232: 8, 1235: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1728: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.LEXUS_CTH: [
{36: 8, 37: 8, 170: 8, 180: 8, 288: 8, 426: 6, 452: 8, 466: 8, 467: 8, 548: 8, 552: 4, 560: 7, 581: 5, 608: 8, 610: 5, 643: 7, 713: 8, 740: 5, 800: 8, 810: 2, 832: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 1, 921: 8, 933: 8, 944: 6, 945: 8, 950: 8, 951: 8, 953: 3, 955: 4, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1056: 8, 1057: 8, 1059: 1, 1076: 8, 1077: 8, 1114: 8, 1116: 8, 1160: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1190: 8, 1191: 8, 1192: 8, 1227: 8, 1235: 8, 1279: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1558: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1664: 8, 1728: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.RAV4H_TSS2: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 658: 8, 713: 8, 728: 8, 740: 5, 742: 8, 743: 8, 761: 8, 764: 8, 765: 8, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 882: 8, 885: 8, 889: 8, 891: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 987: 8, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1063: 8, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1696: 8, 1745: 8, 1775: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1792: 8, 1800: 8, 1808: 8, 1810: 8, 1816: 8, 1818: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1937: 8, 1945: 8, 1952: 8, 1953: 8, 1961: 8, 1968: 8, 1976: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8}
],
CAR.LEXUS_NXH: [
{36: 8, 37: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 5, 643: 7, 713: 8, 740: 5, 742: 8, 743: 8, 764: 8, 800: 8, 810: 2, 812: 3, 818: 8, 822: 8, 824: 8, 835: 8, 836: 8, 845: 5, 849: 4, 869: 7, 870: 7, 871: 2, 889: 8, 891: 8, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 913: 8, 916: 3, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 3, 955: 8, 956: 8, 979: 2, 987: 8, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1006: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1056: 8, 1057: 8, 1059: 1, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1168: 1, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1195: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1208: 8, 1212: 8, 1227: 8, 1228: 8, 1232: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1728: 8, 1745: 8, 1777: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8}
],
CAR.LEXUS_UXH_TSS2: [
{36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 658: 8, 713: 8, 728: 8, 740: 5, 742: 8, 743: 8, 761: 8, 764: 8, 765: 8, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 882: 8, 885: 8, 889: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 942: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 987: 8, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1063: 8, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1575: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1775: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1792: 8, 1800: 8, 1808: 8, 1810: 8, 1813: 8, 1814: 8, 1816: 8, 1818: 8, 1821: 8, 1822: 8, 1840: 8, 1848: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1937: 8, 1940: 8, 1941: 8, 1945: 8, 1948: 8, 1949: 8, 1952: 8, 1953: 8, 1956: 8, 1960: 8, 1961: 8, 1964: 8, 1968: 8, 1976: 8, 1986: 8, 1990: 8, 1994: 8, 1998: 8, 2004: 8, 2012: 8, 2015: 8, 2016: 8, 2024: 8}
],
}
IGNORED_FINGERPRINTS = [CAR.LEXUS_RXH_TSS2]
FW_VERSIONS = {
CAR.AVALON: {
(Ecu.esp, 0x7b0, None): [b'F152607060\x00\x00\x00\x00\x00\x00'],
(Ecu.dsu, 0x791, None): [
b'881510705200\x00\x00\x00\x00',
b'881510701300\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [b'8965B41051\x00\x00\x00\x00\x00\x00'],
(Ecu.engine, 0x7e0, None): [
b'\x0230721100\x00\x00\x00\x00\x00\x00\x00\x00A0C01000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230721200\x00\x00\x00\x00\x00\x00\x00\x00A0C01000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0701100\x00\x00\x00\x00',
b'8646F0703000\x00\x00\x00\x00',
],
},
CAR.CAMRY: {
(Ecu.engine, 0x700, None): [
b'\x018966306L3100\x00\x00\x00\x00',
b'\x018966306L4200\x00\x00\x00\x00',
b'\x018966306L5200\x00\x00\x00\x00',
b'\x018966306Q3100\x00\x00\x00\x00',
b'\x018966306Q4000\x00\x00\x00\x00',
b'\x018966306Q4100\x00\x00\x00\x00',
b'\x018966333P3100\x00\x00\x00\x00',
b'\x018966333P3200\x00\x00\x00\x00',
b'\x018966333P4200\x00\x00\x00\x00',
b'\x018966333P4300\x00\x00\x00\x00',
b'\x018966333P4400\x00\x00\x00\x00',
b'\x018966333P4500\x00\x00\x00\x00',
b'\x018966333P4700\x00\x00\x00\x00',
b'\x018966333Q6000\x00\x00\x00\x00',
b'\x018966333Q6200\x00\x00\x00\x00',
b'\x018966306Q4100\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0603300 ',
b'8821F0607200 ',
b'8821F0608000 ',
],
(Ecu.esp, 0x7b0, None): [
b'F152606210\x00\x00\x00\x00\x00\x00',
b'F152606230\x00\x00\x00\x00\x00\x00',
b'F152606290\x00\x00\x00\x00\x00\x00',
b'F152633540\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33540\x00\x00\x00\x00\x00\x00',
b'8965B33542\x00\x00\x00\x00\x00\x00',
b'8965B33580\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [ # Same as 0x791
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0603300 ',
b'8821F0607200 ',
b'8821F0608000 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0601200 ',
b'8646F0601300 ',
b'8646F0603400 ',
b'8646F0605000 ',
b'8646F0606000 ',
],
},
CAR.CAMRYH: {
(Ecu.engine, 0x700, None): [
b'\x018966333N4300\x00\x00\x00\x00',
b'\x018966333X0000\x00\x00\x00\x00',
b'\x028966306B2100\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306B2300\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8100\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8200\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8300\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8400\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306R5000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306R5000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306R6000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306R6000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306S0000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306S0100\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306S1100\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152633214\x00\x00\x00\x00\x00\x00',
b'F152633660\x00\x00\x00\x00\x00\x00',
b'F152633712\x00\x00\x00\x00\x00\x00',
b'F152633713\x00\x00\x00\x00\x00\x00',
b'F152633B51\x00\x00\x00\x00\x00\x00',
b'F152633B60\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0603400 ',
b'8821F0604200 ',
b'8821F0606200 ',
b'8821F0607200 ',
b'8821F0608000 ',
b'8821F0609000 ',
b'8821F0609100 ',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33540\x00\x00\x00\x00\x00\x00',
b'8965B33542\x00\x00\x00\x00\x00\x00',
b'8965B33550\x00\x00\x00\x00\x00\x00',
b'8965B33551\x00\x00\x00\x00\x00\x00',
b'8965B33580\x00\x00\x00\x00\x00\x00',
b'8965B33581\x00\x00\x00\x00\x00\x00',
b'8965B33611\x00\x00\x00\x00\x00\x00',
b'8965B33621\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [ # Same as 0x791
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0603400 ',
b'8821F0604200 ',
b'8821F0606200 ',
b'8821F0607200 ',
b'8821F0608000 ',
b'8821F0609000 ',
b'8821F0609100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0601200 ',
b'8646F0601300 ',
b'8646F0601400 ',
b'8646F0603500 ',
b'8646F0604100 ',
b'8646F0605000 ',
b'8646F0606000 ',
b'8646F0606100 ',
b'8646F0607000 ',
b'8646F0607100 ',
],
},
CAR.CHR: {
(Ecu.engine, 0x700, None): [
b'\x01896631017100\x00\x00\x00\x00',
b'\x01896631017200\x00\x00\x00\x00',
b'\x0189663F413100\x00\x00\x00\x00',
b'\x0189663F414100\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0W01000 ',
b'8821FF401600 ',
b'8821FF404100 ',
b'8821FF405100 ',
b'8821FF406000 ',
b'8821F0W01100 ',
],
(Ecu.esp, 0x7b0, None): [
b'F152610020\x00\x00\x00\x00\x00\x00',
b'F152610153\x00\x00\x00\x00\x00\x00',
b'F1526F4034\x00\x00\x00\x00\x00\x00',
b'F1526F4044\x00\x00\x00\x00\x00\x00',
b'F1526F4073\x00\x00\x00\x00\x00\x00',
b'F1526F4122\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B10011\x00\x00\x00\x00\x00\x00',
b'8965B10040\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x033F401100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203102\x00\x00\x00\x00',
b'\x033F424000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
b'\x0331024000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F0W01000 ',
b'8821FF401600 ',
b'8821FF404100 ',
b'8821FF405100 ',
b'8821FF406000 ',
b'8821F0W01100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646FF401800 ',
b'8646FF404000 ',
b'8646FF406000 ',
],
},
CAR.CHRH: {
(Ecu.engine, 0x700, None): [
b'\x0289663F423000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0289663F431000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0189663F438000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152610040\x00\x00\x00\x00\x00\x00',
b'F152610190\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821FF404000 ',
b'8821FF407100 ',
],
(Ecu.eps, 0x7a1, None): [
b'8965B10040\x00\x00\x00\x00\x00\x00',
b'8965B10050\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821FF404000 ',
b'8821FF407100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646FF404000 ',
b'8646FF407000 ',
],
},
CAR.COROLLA: {
(Ecu.engine, 0x7e0, None): [
b'\x01896630E88000\x00\x00\x00\x00',
b'\x0230ZC2000\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC2100\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC2200\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC2300\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3000\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3200\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3300\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0330ZC1200\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510201100\x00\x00\x00\x00',
b'881510201200\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152602190\x00\x00\x00\x00\x00\x00',
b'F152602191\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B02181\x00\x00\x00\x00\x00\x00',
b'8965B02191\x00\x00\x00\x00\x00\x00',
b'8965B48150\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0201101\x00\x00\x00\x00',
b'8646F0201200\x00\x00\x00\x00',
b'8646F0E01300\x00\x00\x00\x00',
],
},
CAR.COROLLA_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630ZG5000\x00\x00\x00\x00',
b'\x01896630ZG5100\x00\x00\x00\x00',
b'\x01896630ZG5200\x00\x00\x00\x00',
b'\x01896630ZG5300\x00\x00\x00\x00',
b'\x01896630ZQ5000\x00\x00\x00\x00',
b'\x018966312L8000\x00\x00\x00\x00',
b'\x018966312P9000\x00\x00\x00\x00',
b'\x018966312P9100\x00\x00\x00\x00',
b'\x018966312P9200\x00\x00\x00\x00',
b'\x018966312R0100\x00\x00\x00\x00',
b'\x018966312R1000\x00\x00\x00\x00',
b'\x018966312R1100\x00\x00\x00\x00',
b'\x018966312R3100\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x03312N6000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
b'\x03312N6000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
b'\x03312N6100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
b'\x03312N6100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203402\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B12361\x00\x00\x00\x00\x00\x00',
b'\x018965B12350\x00\x00\x00\x00\x00\x00',
b'\x018965B12500\x00\x00\x00\x00\x00\x00',
b'\x018965B12520\x00\x00\x00\x00\x00\x00',
b'\x018965B12530\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152602191\x00\x00\x00\x00\x00\x00',
b'\x01F152602280\x00\x00\x00\x00\x00\x00',
b'\x01F152602560\x00\x00\x00\x00\x00\x00',
b'\x01F152612641\x00\x00\x00\x00\x00\x00',
b'\x01F152612651\x00\x00\x00\x00\x00\x00',
b'\x01F152612B10\x00\x00\x00\x00\x00\x00',
b'\x01F152612B60\x00\x00\x00\x00\x00\x00',
b'\x01F152612B61\x00\x00\x00\x00\x00\x00',
b'\x01F152612B90\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F12010D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201100\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1202000\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F1202100\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.COROLLAH_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630ZJ1000\x00\x00\x00\x00',
b'\x018966342M5000\x00\x00\x00\x00',
b'\x02896630ZQ3000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896630ZR2000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966312Q4000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x038966312L7000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF1205001\x00\x00\x00\x00',
b'\x038966312N1000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
b'\x038966312L7000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF1205001\x00\x00\x00\x00',
b'\x02896630ZN8000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B12361\x00\x00\x00\x00\x00\x00',
b'8965B12451\x00\x00\x00\x00\x00\x00',
b'\x018965B12350\x00\x00\x00\x00\x00\x00',
b'\x018965B12470\x00\x00\x00\x00\x00\x00',
b'\x018965B12500\x00\x00\x00\x00\x00\x00',
b'\x018965B12530\x00\x00\x00\x00\x00\x00',
b'\x018965B12490\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152612A00\x00\x00\x00\x00\x00\x00',
b'F152612590\x00\x00\x00\x00\x00\x00',
b'F152612691\x00\x00\x00\x00\x00\x00',
b'F152612692\x00\x00\x00\x00\x00\x00',
b'F152612700\x00\x00\x00\x00\x00\x00',
b'F152612800\x00\x00\x00\x00\x00\x00',
b'F152612840\x00\x00\x00\x00\x00\x00',
b'F152612A10\x00\x00\x00\x00\x00\x00',
b'F152642540\x00\x00\x00\x00\x00\x00',
b'F152612820\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F1201100\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201300\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F1202000\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203400\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F1201300\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.HIGHLANDER: {
(Ecu.engine, 0x700, None): [
b'\x01896630E09000\x00\x00\x00\x00',
b'\x01896630E43100\x00\x00\x00\x00',
b'\x01896630E43200\x00\x00\x00\x00',
b'\x01896630E44200\x00\x00\x00\x00',
b'\x01896630E45000\x00\x00\x00\x00',
b'\x01896630E45100\x00\x00\x00\x00',
b'\x01896630E45200\x00\x00\x00\x00',
b'\x01896630E74000\x00\x00\x00\x00',
b'\x01896630E76000\x00\x00\x00\x00',
b'\x01896630E83000\x00\x00\x00\x00',
b'\x01896630E84000\x00\x00\x00\x00',
b'\x01896630E85000\x00\x00\x00\x00',
b'\x01896630E88000\x00\x00\x00\x00',
b'\x01896630E09000\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B48140\x00\x00\x00\x00\x00\x00',
b'8965B48150\x00\x00\x00\x00\x00\x00',
b'8965B48210\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [b'F15260E011\x00\x00\x00\x00\x00\x00'],
(Ecu.dsu, 0x791, None): [
b'881510E01100\x00\x00\x00\x00',
b'881510E01200\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0E01200\x00\x00\x00\x00',
b'8646F0E01300\x00\x00\x00\x00',
],
},
CAR.HIGHLANDERH: {
(Ecu.eps, 0x7a1, None): [
b'8965B48160\x00\x00\x00\x00\x00\x00'
],
(Ecu.esp, 0x7b0, None): [
b'F152648541\x00\x00\x00\x00\x00\x00',
b'F152648542\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0230E40000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230EA2000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0E01200\x00\x00\x00\x00',
b'8646F0E01300\x00\x00\x00\x00',
],
},
CAR.HIGHLANDER_TSS2: {
(Ecu.eps, 0x7a1, None): [
b'8965B48241\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F15260E051\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x01896630E64100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F0E02100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
CAR.HIGHLANDERH_TSS2: {
(Ecu.eps, 0x7a1, None): [
b'8965B48241\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F15264872300\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x02896630E66000\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F0E02100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F0E02100\x00\x00\x00\x00!!!!!!!!!!!!!!!!',
],
},
CAR.LEXUS_IS: {
(Ecu.engine, 0x700, None): [
b'\x018966353M7100\x00\x00\x00\x00',
b'\x018966353Q2300\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [b'F152653330\x00\x00\x00\x00\x00\x00'],
(Ecu.dsu, 0x791, None): [
b'881515306400\x00\x00\x00\x00',
b'881515306500\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [b'8965B53271\x00\x00\x00\x00\x00\x00'],
(Ecu.fwdRadar, 0x750, 0xf): [b'8821F4702300\x00\x00\x00\x00'],
(Ecu.fwdCamera, 0x750, 0x6d): [b'8646F5301400\x00\x00\x00\x00'],
},
CAR.PRIUS: {
(Ecu.engine, 0x700, None): [
b'\x02896634761000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634761100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634761200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634763000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634765000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634769100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634774000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634774100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634774200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634782000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634784000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966347A5000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966347A8000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x03896634759100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634759200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634759200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634759300\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634760000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701002\x00\x00\x00\x00',
b'\x03896634760000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634760100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634760200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634760200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634760300\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634768000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703001\x00\x00\x00\x00',
b'\x03896634768000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703002\x00\x00\x00\x00',
b'\x03896634768100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703002\x00\x00\x00\x00',
b'\x03896634785000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4705001\x00\x00\x00\x00',
b'\x03896634786000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4705001\x00\x00\x00\x00',
b'\x03896634786000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
b'\x03896634789000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703002\x00\x00\x00\x00',
b'\x038966347A3000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4707001\x00\x00\x00\x00',
b'\x038966347B6000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
b'\x038966347B7000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B47021\x00\x00\x00\x00\x00\x00',
b'8965B47022\x00\x00\x00\x00\x00\x00',
b'8965B47023\x00\x00\x00\x00\x00\x00',
b'8965B47050\x00\x00\x00\x00\x00\x00',
b'8965B47060\x00\x00\x00\x00\x00\x00', # This is the EPS with good angle sensor
],
(Ecu.esp, 0x7b0, None): [
b'F152647290\x00\x00\x00\x00\x00\x00',
b'F152647300\x00\x00\x00\x00\x00\x00',
b'F152647310\x00\x00\x00\x00\x00\x00',
b'F152647414\x00\x00\x00\x00\x00\x00',
b'F152647415\x00\x00\x00\x00\x00\x00',
b'F152647416\x00\x00\x00\x00\x00\x00',
b'F152647417\x00\x00\x00\x00\x00\x00',
b'F152647470\x00\x00\x00\x00\x00\x00',
b'F152647490\x00\x00\x00\x00\x00\x00',
b'F152647684\x00\x00\x00\x00\x00\x00',
b'F152647862\x00\x00\x00\x00\x00\x00',
b'F152647863\x00\x00\x00\x00\x00\x00',
b'F152647864\x00\x00\x00\x00\x00\x00',
b'F152647865\x00\x00\x00\x00\x00\x00',
b'F152647470\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514702300\x00\x00\x00\x00',
b'881514703100\x00\x00\x00\x00',
b'881514704100\x00\x00\x00\x00',
b'881514706000\x00\x00\x00\x00',
b'881514706100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4201200\x00\x00\x00\x00',
b'8646F4701300\x00\x00\x00\x00',
b'8646F4702001\x00\x00\x00\x00',
b'8646F4702100\x00\x00\x00\x00',
b'8646F4702200\x00\x00\x00\x00',
b'8646F4705000\x00\x00\x00\x00',
b'8646F4705200\x00\x00\x00\x00',
],
},
CAR.PRIUS_2019: {
(Ecu.engine, 0x700, None): [
b'\x028966347A5000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x038966347B6000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B47060\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152647470\x00\x00\x00\x00\x00\x00',
b'F152647290\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514706000\x00\x00\x00\x00',
b'881514706100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4705200\x00\x00\x00\x00',
],
},
CAR.RAV4: {
(Ecu.engine, 0x7e0, None): [
b'\x02342Q1000\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q1100\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q1200\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q1300\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2000\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2100\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2200\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q4000\x00\x00\x00\x00\x00\x00\x00\x0054215000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42082\x00\x00\x00\x00\x00\x00',
b'8965B42083\x00\x00\x00\x00\x00\x00',
b'8965B42063\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F15260R102\x00\x00\x00\x00\x00\x00',
b'F15260R103\x00\x00\x00\x00\x00\x00',
b'F152642493\x00\x00\x00\x00\x00\x00',
b'F152642492\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514201200\x00\x00\x00\x00',
b'881514201300\x00\x00\x00\x00',
b'881514201400\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4201200\x00\x00\x00\x00',
b'8646F4202001\x00\x00\x00\x00',
b'8646F4202100\x00\x00\x00\x00',
],
},
CAR.RAV4H: {
(Ecu.engine, 0x7e0, None): [
b'\x02342N9000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342N9100\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342P0000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2000\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42103\x00\x00\x00\x00\x00\x00',
b'8965B42162\x00\x00\x00\x00\x00\x00',
b'8965B42163\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152642090\x00\x00\x00\x00\x00\x00',
b'F152642120\x00\x00\x00\x00\x00\x00',
b'F152642400\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514202200\x00\x00\x00\x00',
b'881514202300\x00\x00\x00\x00',
b'881514202400\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4201100\x00\x00\x00\x00',
b'8646F4201200\x00\x00\x00\x00',
b'8646F4202001\x00\x00\x00\x00',
b'8646F4202100\x00\x00\x00\x00',
b'8646F4204000\x00\x00\x00\x00',
],
},
CAR.RAV4_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630R58000\x00\x00\x00\x00',
b'\x018966342E2000\x00\x00\x00\x00',
b'\x018966342M8000\x00\x00\x00\x00',
b'\x018966342T1000\x00\x00\x00\x00',
b'\x018966342T6000\x00\x00\x00\x00',
b'\x018966342T9000\x00\x00\x00\x00',
b'\x018966342U4000\x00\x00\x00\x00',
b'\x018966342V3100\x00\x00\x00\x00',
b'\x018966342V3200\x00\x00\x00\x00',
b'\x018966342X5000\x00\x00\x00\x00',
b'\x01896634A05000\x00\x00\x00\x00',
b'\x01896634A19000\x00\x00\x00\x00',
b'\x01896634A19100\x00\x00\x00\x00',
b'\x01896634A20000\x00\x00\x00\x00',
b'\x01896634A22000\x00\x00\x00\x00',
b'\x018966342U4000\x00\x00\x00\x00',
b'\x01F152642551\x00\x00\x00\x00\x00\x00',
b'\x028966342T0000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x028966342Y8000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x02896634A18000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152642520\x00\x00\x00\x00\x00\x00',
b'\x01F15260R210\x00\x00\x00\x00\x00\x00',
b'\x01F15260R220\x00\x00\x00\x00\x00\x00',
b'\x01F15260R300\x00\x00\x00\x00\x00\x00',
b'\x01F152642551\x00\x00\x00\x00\x00\x00',
b'\x01F152642561\x00\x00\x00\x00\x00\x00',
b'\x01F152642700\x00\x00\x00\x00\x00\x00',
b'\x01F152642710\x00\x00\x00\x00\x00\x00',
b'\x01F152642750\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42170\x00\x00\x00\x00\x00\x00',
b'8965B42171\x00\x00\x00\x00\x00\x00',
b'8965B42181\x00\x00\x00\x00\x00\x00',
b'\x028965B0R01200\x00\x00\x00\x008965B0R02200\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4203200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203300\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203400\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203500\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203700\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.RAV4H_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x018966342W8000\x00\x00\x00\x00',
b'\x018966342M5000\x00\x00\x00\x00',
b'\x018966342X6000\x00\x00\x00\x00',
b'\x028966342W4001\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
b'\x02896634A23001\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
b'\x02896634A23001\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152642541\x00\x00\x00\x00\x00\x00',
b'F152642291\x00\x00\x00\x00\x00\x00',
b'F152642330\x00\x00\x00\x00\x00\x00',
b'F152642531\x00\x00\x00\x00\x00\x00',
b'F152642532\x00\x00\x00\x00\x00\x00',
b'F152642521\x00\x00\x00\x00\x00\x00',
b'F152642541\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42170\x00\x00\x00\x00\x00\x00',
b'8965B42171\x00\x00\x00\x00\x00\x00',
b'8965B42181\x00\x00\x00\x00\x00\x00',
b'\x028965B0R01200\x00\x00\x00\x008965B0R02200\x00\x00\x00\x00',
b'\x028965B0R01300\x00\x00\x00\x008965B0R02300\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4203700\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F4203200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203300\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203400\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203500\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
CAR.LEXUS_ES_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x018966333T5100\x00\x00\x00\x00',
b'\x018966333T5000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [b'\x01F152606281\x00\x00\x00\x00\x00\x00'],
(Ecu.eps, 0x7a1, None): [b'8965B33252\x00\x00\x00\x00\x00\x00'],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F3303200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F33030D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
],
},
CAR.SIENNA: {
(Ecu.engine, 0x700, None): [
b'\x01896630832100\x00\x00\x00\x00',
b'\x01896630838000\x00\x00\x00\x00',
b'\x01896630838100\x00\x00\x00\x00',
b'\x01896630842000\x00\x00\x00\x00',
b'\x01896630851000\x00\x00\x00\x00',
b'\x01896630851100\x00\x00\x00\x00',
b'\x01896630852100\x00\x00\x00\x00',
b'\x01896630859000\x00\x00\x00\x00',
b'\x01896630860000\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B45070\x00\x00\x00\x00\x00\x00',
b'8965B45082\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152608130\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510801100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702200\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0801100\x00\x00\x00\x00',
],
},
CAR.LEXUS_ESH_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x028966333S8000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966333V4000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152633423\x00\x00\x00\x00\x00\x00',
b'F152633680\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33252\x00\x00\x00\x00\x00\x00',
b'8965B33590\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F33030D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F3304100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
CAR.LEXUS_NXH: {
(Ecu.engine, 0x7e0, None): [
b'\x0237882000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0237841000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152678160\x00\x00\x00\x00\x00\x00',
b'F152678170\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881517804300\x00\x00\x00\x00',
b'881517804100\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B78100\x00\x00\x00\x00\x00\x00',
b'8965B78060\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F7801300\x00\x00\x00\x00',
b'8646F7801100\x00\x00\x00\x00',
],
},
CAR.LEXUS_RX: {
(Ecu.engine, 0x700, None): [
b'\x01896630E37200\x00\x00\x00\x00',
b'\x01896630E41000\x00\x00\x00\x00',
b'\x01896630E41200\x00\x00\x00\x00',
b'\x01896630E37300\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648472\x00\x00\x00\x00\x00\x00',
b'F152648473\x00\x00\x00\x00\x00\x00',
b'F152648492\x00\x00\x00\x00\x00\x00',
b'F152648493\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514810300\x00\x00\x00\x00',
b'881514810500\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B0E011\x00\x00\x00\x00\x00\x00',
b'8965B0E012\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4701000\x00\x00\x00\x00',
b'8821F4701100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4801100\x00\x00\x00\x00',
b'8646F4801200\x00\x00\x00\x00',
b'8646F4802001\x00\x00\x00\x00',
b'8646F4802100\x00\x00\x00\x00',
],
},
CAR.LEXUS_RXH: {
(Ecu.engine, 0x7e0, None): [
b'\x02348N0000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348Q4000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348T1100\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348V6000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348Z3000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648361\x00\x00\x00\x00\x00\x00',
b'F152648501\x00\x00\x00\x00\x00\x00',
b'F152648502\x00\x00\x00\x00\x00\x00',
b'F152648504\x00\x00\x00\x00\x00\x00',
b'F152648A30\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514811300\x00\x00\x00\x00',
b'881514811500\x00\x00\x00\x00',
b'881514811700\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B0E011\x00\x00\x00\x00\x00\x00',
b'8965B0E012\x00\x00\x00\x00\x00\x00',
b'8965B48112\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4701000\x00\x00\x00\x00',
b'8821F4701100\x00\x00\x00\x00',
b'8821F4701200\x00\x00\x00\x00',
b'8821F4701300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4801200\x00\x00\x00\x00',
b'8646F4802100\x00\x00\x00\x00',
b'8646F4802200\x00\x00\x00\x00',
b'8646F4809000\x00\x00\x00\x00',
],
},
CAR.LEXUS_RX_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630EB0000\x00\x00\x00\x00',
b'\x01896630EA9000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F15260E031\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B48271\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4810100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
CAR.LEXUS_RXH_TSS2: {
(Ecu.engine, 0x7e0, None): [
b'\x02348X8000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648831\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B48271\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4810100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
}
STEER_THRESHOLD = 100
DBC = {
CAR.RAV4H: dbc_dict('toyota_rav4_hybrid_2017_pt_generated', 'toyota_adas'),
CAR.RAV4: dbc_dict('toyota_rav4_2017_pt_generated', 'toyota_adas'),
CAR.PRIUS: dbc_dict('toyota_prius_2017_pt_generated', 'toyota_adas'),
CAR.PRIUS_2019: dbc_dict('toyota_prius_2017_pt_generated', 'toyota_adas'),
CAR.COROLLA: dbc_dict('toyota_corolla_2017_pt_generated', 'toyota_adas'),
CAR.COROLLA_2015: dbc_dict('toyota_corolla_2017_pt_generated', 'toyota_adas'),
CAR.LEXUS_RX: dbc_dict('lexus_rx_350_2016_pt_generated', 'toyota_adas'),
CAR.LEXUS_RXH: dbc_dict('lexus_rx_hybrid_2017_pt_generated', 'toyota_adas'),
CAR.LEXUS_RX_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_RXH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.CHR: dbc_dict('toyota_nodsu_pt_generated', 'toyota_adas'),
CAR.CHRH: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_adas'),
CAR.CAMRY: dbc_dict('toyota_nodsu_pt_generated', 'toyota_adas'),
CAR.CAMRYH: dbc_dict('toyota_camry_hybrid_2018_pt_generated', 'toyota_adas'),
CAR.HIGHLANDER: dbc_dict('toyota_highlander_2017_pt_generated', 'toyota_adas'),
CAR.HIGHLANDER_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.HIGHLANDERH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.HIGHLANDERH: dbc_dict('toyota_highlander_hybrid_2018_pt_generated', 'toyota_adas'),
CAR.HIGHLANDERH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.AVALON: dbc_dict('toyota_avalon_2017_pt_generated', 'toyota_adas'),
CAR.RAV4_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.RAV4H_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.COROLLA_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.COROLLAH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_ES_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_ESH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.SIENNA: dbc_dict('toyota_sienna_xle_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_IS: dbc_dict('lexus_is_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_ISH: dbc_dict('lexus_is_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_CTH: dbc_dict('lexus_ct200h_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_NXH: dbc_dict('lexus_nx300h_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_UXH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
}
NO_DSU_CAR = [CAR.CHR, CAR.CHRH, CAR.CAMRY, CAR.CAMRYH, CAR.RAV4_TSS2, CAR.COROLLA_TSS2, CAR.COROLLAH_TSS2, CAR.LEXUS_ES_TSS2, CAR.LEXUS_ESH_TSS2, CAR.RAV4H_TSS2, CAR.LEXUS_RX_TSS2, CAR.HIGHLANDER_TSS2, CAR.LEXUS_UXH_TSS2, CAR.HIGHLANDERH_TSS2]
TSS2_CAR = [CAR.RAV4_TSS2, CAR.COROLLA_TSS2, CAR.COROLLAH_TSS2, CAR.LEXUS_ES_TSS2, CAR.LEXUS_ESH_TSS2, CAR.RAV4H_TSS2, CAR.LEXUS_RX_TSS2, CAR.HIGHLANDER_TSS2, CAR.LEXUS_UXH_TSS2, CAR.HIGHLANDERH_TSS2]
NO_STOP_TIMER_CAR = [CAR.RAV4H, CAR.HIGHLANDERH, CAR.HIGHLANDER, CAR.RAV4_TSS2, CAR.COROLLA_TSS2, CAR.COROLLAH_TSS2, CAR.LEXUS_ES_TSS2, CAR.LEXUS_ESH_TSS2, CAR.SIENNA, CAR.RAV4H_TSS2, CAR.LEXUS_RX_TSS2, CAR.HIGHLANDER_TSS2, CAR.LEXUS_UXH_TSS2, CAR.HIGHLANDERH_TSS2] # no resume button press required
| true
| true
|
1c4990d4c73ad0f202de3c760de5c414e397c7d8
| 636
|
py
|
Python
|
paths.py
|
ChiLlx/Modified-3D-UNet-Pytorch
|
0b3bb64bbfa7c5422b2fc85d0c4eb37c0773afec
|
[
"MIT"
] | 193
|
2018-04-17T07:28:16.000Z
|
2022-03-23T00:34:43.000Z
|
paths.py
|
ChiLlx/Modified-3D-UNet-Pytorch
|
0b3bb64bbfa7c5422b2fc85d0c4eb37c0773afec
|
[
"MIT"
] | 7
|
2018-07-16T01:54:23.000Z
|
2020-12-04T06:55:02.000Z
|
paths.py
|
ChiLlx/Modified-3D-UNet-Pytorch
|
0b3bb64bbfa7c5422b2fc85d0c4eb37c0773afec
|
[
"MIT"
] | 50
|
2018-08-13T23:06:19.000Z
|
2021-12-09T09:42:13.000Z
|
raw_training_data_folder = "/media/pkao/Dataset/BraTS2018/training"
raw_validation_data_folder = "/media/pkao/Dataset/BraTS/2017/Brats17ValidationData"
raw_testing_data_folder = "/media/pkao/Datase/BraTS/2017/Brats17TestingData"
preprocessed_training_data_folder = "/media/pkao/Dataset/DeepLearningData/BraTS_2018_train"
preprocessed_validation_data_folder = "/media/pkao/Dataset/DeepLearningData/BraTS_2017_val"
preprocessed_testing_data_folder = "/media/pkao/Dataset/DeepLearningData/datasets/BraTS_2017_test"
#results_folder = "/home/pkao/PhD/results/BraTS_2017_lasagne/" # where to save the network training and validation files
| 63.6
| 121
| 0.849057
|
raw_training_data_folder = "/media/pkao/Dataset/BraTS2018/training"
raw_validation_data_folder = "/media/pkao/Dataset/BraTS/2017/Brats17ValidationData"
raw_testing_data_folder = "/media/pkao/Datase/BraTS/2017/Brats17TestingData"
preprocessed_training_data_folder = "/media/pkao/Dataset/DeepLearningData/BraTS_2018_train"
preprocessed_validation_data_folder = "/media/pkao/Dataset/DeepLearningData/BraTS_2017_val"
preprocessed_testing_data_folder = "/media/pkao/Dataset/DeepLearningData/datasets/BraTS_2017_test"
| true
| true
|
1c499129a87226688813507ae4e22c4b9909a7a1
| 520
|
py
|
Python
|
example/main.py
|
helioh2/pygame-universe
|
94be98072ff1644480aaaab9692c8040223c3fb1
|
[
"MIT"
] | 1
|
2018-04-04T17:55:35.000Z
|
2018-04-04T17:55:35.000Z
|
example/main.py
|
helioh2/pygame-universe
|
94be98072ff1644480aaaab9692c8040223c3fb1
|
[
"MIT"
] | null | null | null |
example/main.py
|
helioh2/pygame-universe
|
94be98072ff1644480aaaab9692c8040223c3fb1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from funcoes import *
''' ================= '''
''' Main (Big Bang):
'''
''' Jogo -> Jogo '''
''' inicie o mundo com main(JOGO_INICIAL) '''
def main(inic):
big_bang(inic, tela=TELA,
frequencia=60,
quando_tick=mover_jogo,
desenhar=desenha_jogo,
quando_tecla=trata_tecla,
quando_solta_tecla=trata_solta_tecla,
modo_debug=True,
fonte_debug=15
)
main(JOGO_INICIAL)
| 20.8
| 50
| 0.532692
|
from funcoes import *
def main(inic):
big_bang(inic, tela=TELA,
frequencia=60,
quando_tick=mover_jogo,
desenhar=desenha_jogo,
quando_tecla=trata_tecla,
quando_solta_tecla=trata_solta_tecla,
modo_debug=True,
fonte_debug=15
)
main(JOGO_INICIAL)
| true
| true
|
1c4991c37149654c613707697ef34148eef8f639
| 2,815
|
py
|
Python
|
git-auto-commit.py
|
electryone/git-auto-commit
|
66dbd02d0d4696f7f12162784aff0c97318a1a74
|
[
"MIT"
] | null | null | null |
git-auto-commit.py
|
electryone/git-auto-commit
|
66dbd02d0d4696f7f12162784aff0c97318a1a74
|
[
"MIT"
] | null | null | null |
git-auto-commit.py
|
electryone/git-auto-commit
|
66dbd02d0d4696f7f12162784aff0c97318a1a74
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/9/29 22:49
# @Author : Huyd
# @Site :
# @File : git-auto-commit.py
# @Software: PyCharm
import datetime
import smtplib
import subprocess
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
import re
import schedule
import time
def send_mail(subject, message):
mail_host = "smtp.163.com" # 设置邮件服务器
mail_user = "electrycache1@163.com" # 用户名
mail_pass = "21897594" # 口令
sender = 'electrycache1@163.com' # 发送邮件的邮箱
receivers = 'electrycache1@163.com' # 接收邮件的邮箱,可设置为你的QQ邮箱或者其他邮箱,多个邮箱用,分隔开来
# 创建一个带附件的实例
message = MIMEText(message, 'plain', 'utf-8')
message['From'] = "electrycache1@163.com" # 邮件发送人
message['To'] = "electrycache1@163.com" # 邮件接收人
# subject = '测试监测结果' # 邮件主题
message['Subject'] = Header(subject, 'utf-8')
try:
smtpObj = smtplib.SMTP_SSL()
smtpObj.connect(mail_host, 465) # 25 为 SMTP 端口号
smtpObj.login(mail_user, mail_pass)
smtpObj.sendmail(sender, receivers, message.as_string())
print("邮件发送成功")
except smtplib.SMTPException:
print("Error: 无法发送邮件")
def job():
f = open('content.txt', 'a')
f.write(time.asctime(time.localtime(time.time())) + '\n')
date = datetime.datetime.today().isoformat()[0:10]
#status = subprocess.run(["git", "status"])
status = subprocess.run(["git", "status"],shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
print(status)
print('**********start git add.**********')
gadd = subprocess.run(["git", "add", "."],shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
print(gadd)
print('**********git add done.**********')
print('**********start git commit.**********')
gcom = subprocess.run(["git", "commit", "-m" + date],shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
print(gcom)
print('**********git commit done.**********')
print('**********start git push.**********')
gpush = subprocess.run(["git", "push", "origin", "master"],shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
print(gpush)
print('**********git push done.**********')
#send_mail("git a commit", str(date)) # 发送邮件
#time.sleep(61)
def main(h, m):
'''h表示设定的小时,m为设定的分钟'''
while True:
job()
break
# 判断是否达到设定时间,例如0:00
while True:
now = datetime.datetime.now()
print(now.hour, ' ', now.minute, ' ', now.microsecond)
# 到达设定时间,结束内循环
if now.hour == h and now.minute == m:
break
# 不到时间就等20秒之后再次检测
time.sleep(20)
# 做正事,一天做一次
#job()
print(time.asctime(time.localtime(time.time())))
main(23, 55)
| 32.356322
| 125
| 0.596803
|
import datetime
import smtplib
import subprocess
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
import re
import schedule
import time
def send_mail(subject, message):
mail_host = "smtp.163.com"
mail_user = "electrycache1@163.com"
mail_pass = "21897594"
sender = 'electrycache1@163.com'
receivers = 'electrycache1@163.com'
message = MIMEText(message, 'plain', 'utf-8')
message['From'] = "electrycache1@163.com"
message['To'] = "electrycache1@163.com"
essage['Subject'] = Header(subject, 'utf-8')
try:
smtpObj = smtplib.SMTP_SSL()
smtpObj.connect(mail_host, 465)
smtpObj.login(mail_user, mail_pass)
smtpObj.sendmail(sender, receivers, message.as_string())
print("邮件发送成功")
except smtplib.SMTPException:
print("Error: 无法发送邮件")
def job():
f = open('content.txt', 'a')
f.write(time.asctime(time.localtime(time.time())) + '\n')
date = datetime.datetime.today().isoformat()[0:10]
status = subprocess.run(["git", "status"],shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
print(status)
print('**********start git add.**********')
gadd = subprocess.run(["git", "add", "."],shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
print(gadd)
print('**********git add done.**********')
print('**********start git commit.**********')
gcom = subprocess.run(["git", "commit", "-m" + date],shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
print(gcom)
print('**********git commit done.**********')
print('**********start git push.**********')
gpush = subprocess.run(["git", "push", "origin", "master"],shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
print(gpush)
print('**********git push done.**********')
def main(h, m):
while True:
job()
break
while True:
now = datetime.datetime.now()
print(now.hour, ' ', now.minute, ' ', now.microsecond)
if now.hour == h and now.minute == m:
break
time.sleep(20)
print(time.asctime(time.localtime(time.time())))
main(23, 55)
| true
| true
|
1c499260da486610cee64c7a7a643e367163b5b2
| 4,052
|
py
|
Python
|
objects/CSCG/_2d/forms/standard/_2_form/base/reconstruct.py
|
mathischeap/mifem
|
3242e253fb01ca205a76568eaac7bbdb99e3f059
|
[
"MIT"
] | 1
|
2020-10-14T12:48:35.000Z
|
2020-10-14T12:48:35.000Z
|
objects/CSCG/_2d/forms/standard/_2_form/base/reconstruct.py
|
mathischeap/mifem
|
3242e253fb01ca205a76568eaac7bbdb99e3f059
|
[
"MIT"
] | null | null | null |
objects/CSCG/_2d/forms/standard/_2_form/base/reconstruct.py
|
mathischeap/mifem
|
3242e253fb01ca205a76568eaac7bbdb99e3f059
|
[
"MIT"
] | null | null | null |
from screws.freeze.base import FrozenOnly
import numpy as np
class _2dCSCG_S2F_Reconstruct(FrozenOnly):
""""""
def __init__(self, f):
self._f_ = f
self._freeze_self_()
def __call__(self, xi, eta, ravel=False, i=None, vectorized=False, value_only=False):
"""
Reconstruct the standard 3-form.
Given ``xi``, ``eta`` and ``sigma``, we reconstruct the 3-form on ``meshgrid(xi, eta, sigma)``
in all elements.
:param xi: A 1d iterable object of floats between -1 and 1.
:param eta: A 1d iterable object of floats between -1 and 1.
:param i: (`default`:``None``) Do the reconstruction for ``#i`` element. if it is ``None``,
then do it for all elements.
:type i: int, None
:type xi: list, tuple, numpy.ndarray
:type eta: list, tuple, numpy.ndarray
:param bool ravel: (`default`:``False``) If we return 1d data?
:param vectorized:
:param value_only:
:returns: A tuple of outputs
1. (Dict[int, list]) -- :math:`x, y, z` coordinates.
2. (Dict[int, list]) -- Reconstructed values.
"""
f = self._f_
mesh = self._f_.mesh
xietasigma, basis = f.do.evaluate_basis_at_meshgrid(xi, eta)
#--- parse indices --------------------------------------------------
if i is None: # default, in all local mesh-elements.
INDICES = mesh.elements.indices
else:
if vectorized: vectorized = False
if isinstance(i ,int):
INDICES = [i, ]
else:
raise NotImplementedError()
#---- vectorized -----------------------------------------------
if vectorized:
assert INDICES == mesh.elements.indices, f"currently, vectorized computation only works" \
f"for full reconstruction."
det_iJ = mesh.elements.coordinate_transformation.vectorized.inverse_Jacobian(*xietasigma)
if len(INDICES) > 0:
if mesh.elements.IS.homogeneous_according_to_types_wrt_metric:
v = np.einsum('ij, ki, j -> kj', basis[0], f.cochain.array, det_iJ, optimize='greedy')
else:
v = np.einsum('ij, ki, kj -> kj', basis[0], f.cochain.array, det_iJ, optimize='greedy')
else:
v = None
if ravel:
pass
else:
raise NotImplementedError()
if value_only:
return (v,)
else:
raise Exception()
#----- non-vectorized ------------------------------------------------
else:
if value_only:
raise NotImplementedError()
else:
xyz = dict()
value = dict()
shape = [len(xi), len(eta)]
iJC = dict()
for i in INDICES:
element = mesh.elements[i]
typeWr2Metric = element.type_wrt_metric.mark
xyz[i] = element.coordinate_transformation.mapping(*xietasigma)
if typeWr2Metric in iJC:
basis_det_iJ = iJC[typeWr2Metric]
else:
det_iJ = element.coordinate_transformation.inverse_Jacobian(*xietasigma)
basis_det_iJ = basis[0] * det_iJ
if isinstance(typeWr2Metric, str):
iJC[typeWr2Metric] = basis_det_iJ
v = np.einsum('ij, i -> j', basis_det_iJ, f.cochain.local[i], optimize='greedy')
if ravel:
value[i] = [v,]
else:
# noinspection PyUnresolvedReferences
xyz[i] = [xyz[i][j].reshape(shape, order='F') for j in range(2)]
value[i] = [v.reshape(shape, order='F'),]
return xyz, value
| 38.961538
| 107
| 0.488648
|
from screws.freeze.base import FrozenOnly
import numpy as np
class _2dCSCG_S2F_Reconstruct(FrozenOnly):
def __init__(self, f):
self._f_ = f
self._freeze_self_()
def __call__(self, xi, eta, ravel=False, i=None, vectorized=False, value_only=False):
f = self._f_
mesh = self._f_.mesh
xietasigma, basis = f.do.evaluate_basis_at_meshgrid(xi, eta)
if i is None:
INDICES = mesh.elements.indices
else:
if vectorized: vectorized = False
if isinstance(i ,int):
INDICES = [i, ]
else:
raise NotImplementedError()
if vectorized:
assert INDICES == mesh.elements.indices, f"currently, vectorized computation only works" \
f"for full reconstruction."
det_iJ = mesh.elements.coordinate_transformation.vectorized.inverse_Jacobian(*xietasigma)
if len(INDICES) > 0:
if mesh.elements.IS.homogeneous_according_to_types_wrt_metric:
v = np.einsum('ij, ki, j -> kj', basis[0], f.cochain.array, det_iJ, optimize='greedy')
else:
v = np.einsum('ij, ki, kj -> kj', basis[0], f.cochain.array, det_iJ, optimize='greedy')
else:
v = None
if ravel:
pass
else:
raise NotImplementedError()
if value_only:
return (v,)
else:
raise Exception()
else:
if value_only:
raise NotImplementedError()
else:
xyz = dict()
value = dict()
shape = [len(xi), len(eta)]
iJC = dict()
for i in INDICES:
element = mesh.elements[i]
typeWr2Metric = element.type_wrt_metric.mark
xyz[i] = element.coordinate_transformation.mapping(*xietasigma)
if typeWr2Metric in iJC:
basis_det_iJ = iJC[typeWr2Metric]
else:
det_iJ = element.coordinate_transformation.inverse_Jacobian(*xietasigma)
basis_det_iJ = basis[0] * det_iJ
if isinstance(typeWr2Metric, str):
iJC[typeWr2Metric] = basis_det_iJ
v = np.einsum('ij, i -> j', basis_det_iJ, f.cochain.local[i], optimize='greedy')
if ravel:
value[i] = [v,]
else:
xyz[i] = [xyz[i][j].reshape(shape, order='F') for j in range(2)]
value[i] = [v.reshape(shape, order='F'),]
return xyz, value
| true
| true
|
1c4992765c22841944c3a0022d4163faee833f72
| 750
|
py
|
Python
|
scheduleSynchronizer/urls.py
|
497022407/Shifts-manager
|
beccb63c8622c015a9a453f586d4c3bb5d5066b9
|
[
"Apache-2.0"
] | null | null | null |
scheduleSynchronizer/urls.py
|
497022407/Shifts-manager
|
beccb63c8622c015a9a453f586d4c3bb5d5066b9
|
[
"Apache-2.0"
] | null | null | null |
scheduleSynchronizer/urls.py
|
497022407/Shifts-manager
|
beccb63c8622c015a9a453f586d4c3bb5d5066b9
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
# /
path('', views.home, name='home'),
path('guide', views.guide, name='guide'),
# TEMPORARY
path('signin', views.sign_in, name='signin'),
path('signout', views.sign_out, name='signout'),
path('calendar', views.calendar, name='calendar'),
path('shift', views.shift, name='shift'),
path('delete_incorrect_shifts', views.delete_incorrect_shifts,
name='delete_incorrect_shifts'),
path('search_function', views.search_function, name='search_function'),
path('delete_by_id', views.delete_by_id, name='delete_by_id'),
path('callback', views.callback, name='callback'),
path('calendar/new', views.newevent, name='newevent'),
]
| 32.608696
| 75
| 0.674667
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('guide', views.guide, name='guide'),
path('signin', views.sign_in, name='signin'),
path('signout', views.sign_out, name='signout'),
path('calendar', views.calendar, name='calendar'),
path('shift', views.shift, name='shift'),
path('delete_incorrect_shifts', views.delete_incorrect_shifts,
name='delete_incorrect_shifts'),
path('search_function', views.search_function, name='search_function'),
path('delete_by_id', views.delete_by_id, name='delete_by_id'),
path('callback', views.callback, name='callback'),
path('calendar/new', views.newevent, name='newevent'),
]
| true
| true
|
1c49941779b1860c8f46a3c4c6efc4ea5ed1d14a
| 374
|
py
|
Python
|
onlinecourse/migrations/0002_auto_20220120_0050.py
|
jalvaradoWD/final-cloud-app-with-database
|
9d3f814b68f24343b48c336dd4464764e805f0a5
|
[
"Apache-2.0"
] | null | null | null |
onlinecourse/migrations/0002_auto_20220120_0050.py
|
jalvaradoWD/final-cloud-app-with-database
|
9d3f814b68f24343b48c336dd4464764e805f0a5
|
[
"Apache-2.0"
] | null | null | null |
onlinecourse/migrations/0002_auto_20220120_0050.py
|
jalvaradoWD/final-cloud-app-with-database
|
9d3f814b68f24343b48c336dd4464764e805f0a5
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.3 on 2022-01-20 00:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('onlinecourse', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='choice',
name='is_correct',
field=models.BooleanField(),
),
]
| 19.684211
| 47
| 0.588235
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('onlinecourse', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='choice',
name='is_correct',
field=models.BooleanField(),
),
]
| true
| true
|
1c49946e54bff8f5ded1a44c434d7ff67635335a
| 7,120
|
py
|
Python
|
veriloggen/types/ram.py
|
akmaru/veriloggen
|
74f998139e8cf613f7703fa4cffd571bbf069bbc
|
[
"Apache-2.0"
] | null | null | null |
veriloggen/types/ram.py
|
akmaru/veriloggen
|
74f998139e8cf613f7703fa4cffd571bbf069bbc
|
[
"Apache-2.0"
] | null | null | null |
veriloggen/types/ram.py
|
akmaru/veriloggen
|
74f998139e8cf613f7703fa4cffd571bbf069bbc
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import print_function
import copy
import veriloggen.core.vtypes as vtypes
from veriloggen.core.module import Module
from . import util
def mkRAMDefinition(name, datawidth=32, addrwidth=10, numports=2,
initvals=None, sync=True, with_enable=False,
nocheck_initvals=False, ram_style=None):
m = Module(name)
clk = m.Input('CLK')
interfaces = []
for i in range(numports):
interface = RAMSlaveInterface(
m, name + '_%d' % i, datawidth, addrwidth, with_enable=with_enable)
if sync:
interface.delay_addr = m.Reg(name + '_%d_daddr' % i, addrwidth)
interfaces.append(interface)
if ram_style is not None:
m.EmbeddedCode(ram_style)
mem = m.Reg('mem', datawidth, 2**addrwidth)
if initvals is not None:
if not isinstance(initvals, (tuple, list)):
raise TypeError("initvals must be tuple or list, not '%s" %
str(type(initvals)))
base = 16
if not nocheck_initvals:
new_initvals = []
for initval in initvals:
if isinstance(initval, int):
new_initvals.append(
vtypes.Int(initval, datawidth, base=16))
elif isinstance(initval, vtypes.Int) and isinstance(initval.value, int):
v = copy.deepcopy(initval)
v.width = datawidth
v.base = base
new_initvals.append(v)
elif isinstance(initval, vtypes.Int) and isinstance(initval.value, str):
v = copy.deepcopy(initval)
v.width = datawidth
if v.base != 2 and v.base != 16:
raise ValueError('base must be 2 or 16')
base = v.base
new_initvals.append(v)
else:
raise TypeError("values of initvals must be int, not '%s" %
str(type(initval)))
initvals = new_initvals
if 2 ** addrwidth > len(initvals):
initvals.extend(
[vtypes.Int(0, datawidth, base=base)
for _ in range(2 ** addrwidth - len(initvals))])
m.Initial(
*[mem[i](initval) for i, initval in enumerate(initvals)]
)
for interface in interfaces:
body = [
vtypes.If(interface.wenable)(
mem[interface.addr](interface.wdata)
)]
if sync:
body.append(interface.delay_addr(interface.addr))
if with_enable:
body = vtypes.If(interface.enable)(*body)
m.Always(vtypes.Posedge(clk))(
body
)
if sync:
m.Assign(interface.rdata(mem[interface.delay_addr]))
else:
m.Assign(interface.rdata(mem[interface.addr]))
return m
class RAMInterface(object):
_I = 'Reg'
_O = 'Wire'
def __init__(self, m, name=None, datawidth=32, addrwidth=10,
itype=None, otype=None,
p_addr='addr', p_rdata='rdata',
p_wdata='wdata', p_wenable='wenable',
p_enable='enable',
with_enable=False, index=None):
if itype is None:
itype = self._I
if otype is None:
otype = self._O
self.m = m
name_addr = p_addr if name is None else '_'.join([name, p_addr])
name_rdata = p_rdata if name is None else '_'.join([name, p_rdata])
name_wdata = p_wdata if name is None else '_'.join([name, p_wdata])
name_wenable = (
p_wenable if name is None else '_'.join([name, p_wenable]))
if with_enable:
name_enable = (
p_enable if name is None else '_'.join([name, p_enable]))
if index is not None:
name_addr = name_addr + str(index)
name_rdata = name_rdata + str(index)
name_wdata = name_wdata + str(index)
name_wenable = name_wenable + str(index)
if with_enable:
name_enable = name_enable + str(index)
self.addr = util.make_port(m, itype, name_addr, addrwidth, initval=0)
self.rdata = util.make_port(m, otype, name_rdata, datawidth, initval=0)
self.wdata = util.make_port(m, itype, name_wdata, datawidth, initval=0)
self.wenable = util.make_port(m, itype, name_wenable, initval=0)
if with_enable:
self.enable = util.make_port(m, itype, name_enable, initval=0)
def connect(self, targ):
self.addr.connect(targ.addr)
targ.rdata.connect(self.rdata)
self.wdata.connect(targ.wdata)
self.wenable.connect(targ.wenable)
if hasattr(self, 'enable'):
if hasattr(targ, 'enable'):
self.enable.connect(targ.enable)
else:
self.enable.connect(1)
else:
if hasattr(targ, 'enable'):
raise ValueError('no enable port')
class RAMSlaveInterface(RAMInterface):
_I = 'Input'
_O = 'Output'
class RAMMasterInterface(RAMInterface):
_I = 'Output'
_O = 'Input'
class _RAM_RTL(object):
def __init__(self, m, name, clk,
datawidth=32, addrwidth=10, numports=1,
initvals=None, sync=True, with_enable=False):
self.m = m
self.name = name
self.clk = clk
self.with_enable = with_enable
self.interfaces = [RAMInterface(m, name + '_%d' % i, datawidth, addrwidth,
itype='Wire', otype='Wire', with_enable=with_enable)
for i in range(numports)]
ram_def = mkRAMDefinition(name, datawidth, addrwidth, numports,
initvals, sync, with_enable)
self.m.Instance(ram_def, name,
params=(), ports=m.connect_ports(ram_def))
def connect(self, port, addr, wdata, wenable, enable=None):
self.m.Assign(self.interfaces[port].addr(addr))
self.m.Assign(self.interfaces[port].wdata(wdata))
self.m.Assign(self.interfaces[port].wenable(wenable))
if self.with_enable:
self.m.Assign(self.interfaces[port].enable(enable))
def rdata(self, port):
return self.interfaces[port].rdata
class SyncRAM(_RAM_RTL):
def __init__(self, m, name, clk,
datawidth=32, addrwidth=10, numports=1,
initvals=None, with_enable=False):
_RAM_RTL.__init__(self, m, name, clk,
datawidth, addrwidth, numports,
initvals, sync=True, with_enable=with_enable)
class AsyncRAM(_RAM_RTL):
def __init__(self, m, name, clk,
datawidth=32, addrwidth=10, numports=1,
initvals=None, with_enable=False):
_RAM_RTL.__init__(self, m, name, clk,
datawidth, addrwidth, numports,
initvals, sync=False)
| 33.584906
| 92
| 0.556039
|
from __future__ import absolute_import
from __future__ import print_function
import copy
import veriloggen.core.vtypes as vtypes
from veriloggen.core.module import Module
from . import util
def mkRAMDefinition(name, datawidth=32, addrwidth=10, numports=2,
initvals=None, sync=True, with_enable=False,
nocheck_initvals=False, ram_style=None):
m = Module(name)
clk = m.Input('CLK')
interfaces = []
for i in range(numports):
interface = RAMSlaveInterface(
m, name + '_%d' % i, datawidth, addrwidth, with_enable=with_enable)
if sync:
interface.delay_addr = m.Reg(name + '_%d_daddr' % i, addrwidth)
interfaces.append(interface)
if ram_style is not None:
m.EmbeddedCode(ram_style)
mem = m.Reg('mem', datawidth, 2**addrwidth)
if initvals is not None:
if not isinstance(initvals, (tuple, list)):
raise TypeError("initvals must be tuple or list, not '%s" %
str(type(initvals)))
base = 16
if not nocheck_initvals:
new_initvals = []
for initval in initvals:
if isinstance(initval, int):
new_initvals.append(
vtypes.Int(initval, datawidth, base=16))
elif isinstance(initval, vtypes.Int) and isinstance(initval.value, int):
v = copy.deepcopy(initval)
v.width = datawidth
v.base = base
new_initvals.append(v)
elif isinstance(initval, vtypes.Int) and isinstance(initval.value, str):
v = copy.deepcopy(initval)
v.width = datawidth
if v.base != 2 and v.base != 16:
raise ValueError('base must be 2 or 16')
base = v.base
new_initvals.append(v)
else:
raise TypeError("values of initvals must be int, not '%s" %
str(type(initval)))
initvals = new_initvals
if 2 ** addrwidth > len(initvals):
initvals.extend(
[vtypes.Int(0, datawidth, base=base)
for _ in range(2 ** addrwidth - len(initvals))])
m.Initial(
*[mem[i](initval) for i, initval in enumerate(initvals)]
)
for interface in interfaces:
body = [
vtypes.If(interface.wenable)(
mem[interface.addr](interface.wdata)
)]
if sync:
body.append(interface.delay_addr(interface.addr))
if with_enable:
body = vtypes.If(interface.enable)(*body)
m.Always(vtypes.Posedge(clk))(
body
)
if sync:
m.Assign(interface.rdata(mem[interface.delay_addr]))
else:
m.Assign(interface.rdata(mem[interface.addr]))
return m
class RAMInterface(object):
_I = 'Reg'
_O = 'Wire'
def __init__(self, m, name=None, datawidth=32, addrwidth=10,
itype=None, otype=None,
p_addr='addr', p_rdata='rdata',
p_wdata='wdata', p_wenable='wenable',
p_enable='enable',
with_enable=False, index=None):
if itype is None:
itype = self._I
if otype is None:
otype = self._O
self.m = m
name_addr = p_addr if name is None else '_'.join([name, p_addr])
name_rdata = p_rdata if name is None else '_'.join([name, p_rdata])
name_wdata = p_wdata if name is None else '_'.join([name, p_wdata])
name_wenable = (
p_wenable if name is None else '_'.join([name, p_wenable]))
if with_enable:
name_enable = (
p_enable if name is None else '_'.join([name, p_enable]))
if index is not None:
name_addr = name_addr + str(index)
name_rdata = name_rdata + str(index)
name_wdata = name_wdata + str(index)
name_wenable = name_wenable + str(index)
if with_enable:
name_enable = name_enable + str(index)
self.addr = util.make_port(m, itype, name_addr, addrwidth, initval=0)
self.rdata = util.make_port(m, otype, name_rdata, datawidth, initval=0)
self.wdata = util.make_port(m, itype, name_wdata, datawidth, initval=0)
self.wenable = util.make_port(m, itype, name_wenable, initval=0)
if with_enable:
self.enable = util.make_port(m, itype, name_enable, initval=0)
def connect(self, targ):
self.addr.connect(targ.addr)
targ.rdata.connect(self.rdata)
self.wdata.connect(targ.wdata)
self.wenable.connect(targ.wenable)
if hasattr(self, 'enable'):
if hasattr(targ, 'enable'):
self.enable.connect(targ.enable)
else:
self.enable.connect(1)
else:
if hasattr(targ, 'enable'):
raise ValueError('no enable port')
class RAMSlaveInterface(RAMInterface):
_I = 'Input'
_O = 'Output'
class RAMMasterInterface(RAMInterface):
_I = 'Output'
_O = 'Input'
class _RAM_RTL(object):
def __init__(self, m, name, clk,
datawidth=32, addrwidth=10, numports=1,
initvals=None, sync=True, with_enable=False):
self.m = m
self.name = name
self.clk = clk
self.with_enable = with_enable
self.interfaces = [RAMInterface(m, name + '_%d' % i, datawidth, addrwidth,
itype='Wire', otype='Wire', with_enable=with_enable)
for i in range(numports)]
ram_def = mkRAMDefinition(name, datawidth, addrwidth, numports,
initvals, sync, with_enable)
self.m.Instance(ram_def, name,
params=(), ports=m.connect_ports(ram_def))
def connect(self, port, addr, wdata, wenable, enable=None):
self.m.Assign(self.interfaces[port].addr(addr))
self.m.Assign(self.interfaces[port].wdata(wdata))
self.m.Assign(self.interfaces[port].wenable(wenable))
if self.with_enable:
self.m.Assign(self.interfaces[port].enable(enable))
def rdata(self, port):
return self.interfaces[port].rdata
class SyncRAM(_RAM_RTL):
def __init__(self, m, name, clk,
datawidth=32, addrwidth=10, numports=1,
initvals=None, with_enable=False):
_RAM_RTL.__init__(self, m, name, clk,
datawidth, addrwidth, numports,
initvals, sync=True, with_enable=with_enable)
class AsyncRAM(_RAM_RTL):
def __init__(self, m, name, clk,
datawidth=32, addrwidth=10, numports=1,
initvals=None, with_enable=False):
_RAM_RTL.__init__(self, m, name, clk,
datawidth, addrwidth, numports,
initvals, sync=False)
| true
| true
|
1c499523c6731619a57785345e99096f3cd43458
| 3,402
|
py
|
Python
|
tapiriik/services/ratelimiting.py
|
Decathlon/exercisync
|
e9df9d4f2210fff8cfc8b34e2e5f9d09d84bddef
|
[
"Apache-2.0"
] | 11
|
2019-08-05T15:38:25.000Z
|
2022-03-12T09:50:02.000Z
|
tapiriik/services/ratelimiting.py
|
Decathlon/exercisync
|
e9df9d4f2210fff8cfc8b34e2e5f9d09d84bddef
|
[
"Apache-2.0"
] | 31
|
2019-03-05T20:38:11.000Z
|
2022-03-21T09:41:23.000Z
|
tapiriik/services/ratelimiting.py
|
Decathlon/exercisync
|
e9df9d4f2210fff8cfc8b34e2e5f9d09d84bddef
|
[
"Apache-2.0"
] | 8
|
2019-03-05T08:20:07.000Z
|
2021-08-18T08:20:17.000Z
|
from tapiriik.database import ratelimit as rl_db, redis
from tapiriik.settings import _GLOBAL_LOGGER
from pymongo.read_preferences import ReadPreference
from datetime import datetime, timedelta
import math
import logging
class RateLimitExceededException(Exception):
pass
class RateLimit:
def Limit(key):
current_limits = rl_db.limits.find({"Key": key}, {"Max": 1, "Count": 1})
for limit in current_limits:
if limit["Max"] < limit["Count"]:
# We can't continue without exceeding this limit
# Don't want to halt the synchronization worker to wait for 15min-1 hour
# So...
raise RateLimitExceededException()
_GLOBAL_LOGGER.info("Adding 1 to count")
rl_db.limits.update_many({"Key": key}, {"$inc": {"Count": 1}})
def Refresh(key, limits):
# Limits is in format [(timespan, max-count),...]
# The windows are anchored at midnight
# The timespan is used to uniquely identify limit instances between runs
midnight = datetime.combine(datetime.utcnow().date(), datetime.min.time())
time_since_midnight = (datetime.utcnow() - midnight)
rl_db.limits.delete_many({"Key": key, "Expires": {"$lt": datetime.utcnow()}})
current_limits = list(rl_db.limits.with_options(read_preference=ReadPreference.PRIMARY).find({"Key": key}, {"Duration": 1}))
missing_limits = [x for x in limits if x[0].total_seconds() not in [limit["Duration"] for limit in current_limits]]
for limit in missing_limits:
window_start = midnight + timedelta(seconds=math.floor(time_since_midnight.total_seconds()/limit[0].total_seconds()) * limit[0].total_seconds())
window_end = window_start + limit[0]
rl_db.limits.insert({"Key": key, "Count": 0, "Duration": limit[0].total_seconds(), "Max": limit[1], "Expires": window_end})
class RedisRateLimit:
def IsOneRateLimitReached(rate_limited_services):
for svc in rate_limited_services:
for limit in svc.GlobalRateLimits:
limit_timedelta_seconds = int(limit[0].total_seconds())
limit_number = limit[1]
limit_key = svc.ID+":lm:"+str(limit_timedelta_seconds)
actual_limit = redis.get(limit_key)
if actual_limit != None:
if int(actual_limit.decode('utf-8')) >= (limit_number * 0.95):
return True
return False
def Limit(key, limits):
for limit in limits:
limit_timedelta_seconds = int(limit[0].total_seconds())
limit_number = limit[1]
limit_key = key+":lm:"+str(limit_timedelta_seconds)
# Increasing the key by one
# If it does not exist or it has expired it will be set to one
# The incr function of redis is atomic and "SHOULD" not create race condition
actual_rl = redis.incr(limit_key)
# The key expires at time is determined by :
# - now in UNIX epoch floor divided by limit_timedelta_seconds
# - added by one to simulate a ceil division
# - multiplied by limit_timedelta_seconds to set this back in an UNIX epoch timestamp
redis.expireat(limit_key, ((int(datetime.now().strftime('%s')) // limit_timedelta_seconds)+1) * limit_timedelta_seconds)
# Well, here we might loose 1 api call but this is for security purpose if an unexpected race condition happens
# Better safe than sorry :)
if actual_rl >= limit_number-1:
raise RateLimitExceededException("Actual rate limit : %s / Max rate limit : %s" % (actual_rl, limit_number))
_GLOBAL_LOGGER.info("Adding 1 to %s %s limit count. It is now %s/%s" % (key, limit_number, actual_rl, limit_number))
| 46.60274
| 147
| 0.726925
|
from tapiriik.database import ratelimit as rl_db, redis
from tapiriik.settings import _GLOBAL_LOGGER
from pymongo.read_preferences import ReadPreference
from datetime import datetime, timedelta
import math
import logging
class RateLimitExceededException(Exception):
pass
class RateLimit:
def Limit(key):
current_limits = rl_db.limits.find({"Key": key}, {"Max": 1, "Count": 1})
for limit in current_limits:
if limit["Max"] < limit["Count"]:
# Don't want to halt the synchronization worker to wait for 15min-1 hour
raise RateLimitExceededException()
_GLOBAL_LOGGER.info("Adding 1 to count")
rl_db.limits.update_many({"Key": key}, {"$inc": {"Count": 1}})
def Refresh(key, limits):
midnight = datetime.combine(datetime.utcnow().date(), datetime.min.time())
time_since_midnight = (datetime.utcnow() - midnight)
rl_db.limits.delete_many({"Key": key, "Expires": {"$lt": datetime.utcnow()}})
current_limits = list(rl_db.limits.with_options(read_preference=ReadPreference.PRIMARY).find({"Key": key}, {"Duration": 1}))
missing_limits = [x for x in limits if x[0].total_seconds() not in [limit["Duration"] for limit in current_limits]]
for limit in missing_limits:
window_start = midnight + timedelta(seconds=math.floor(time_since_midnight.total_seconds()/limit[0].total_seconds()) * limit[0].total_seconds())
window_end = window_start + limit[0]
rl_db.limits.insert({"Key": key, "Count": 0, "Duration": limit[0].total_seconds(), "Max": limit[1], "Expires": window_end})
class RedisRateLimit:
def IsOneRateLimitReached(rate_limited_services):
for svc in rate_limited_services:
for limit in svc.GlobalRateLimits:
limit_timedelta_seconds = int(limit[0].total_seconds())
limit_number = limit[1]
limit_key = svc.ID+":lm:"+str(limit_timedelta_seconds)
actual_limit = redis.get(limit_key)
if actual_limit != None:
if int(actual_limit.decode('utf-8')) >= (limit_number * 0.95):
return True
return False
def Limit(key, limits):
for limit in limits:
limit_timedelta_seconds = int(limit[0].total_seconds())
limit_number = limit[1]
limit_key = key+":lm:"+str(limit_timedelta_seconds)
actual_rl = redis.incr(limit_key)
redis.expireat(limit_key, ((int(datetime.now().strftime('%s')) // limit_timedelta_seconds)+1) * limit_timedelta_seconds)
if actual_rl >= limit_number-1:
raise RateLimitExceededException("Actual rate limit : %s / Max rate limit : %s" % (actual_rl, limit_number))
_GLOBAL_LOGGER.info("Adding 1 to %s %s limit count. It is now %s/%s" % (key, limit_number, actual_rl, limit_number))
| true
| true
|
1c4995509deb8fbfcad4283c6a2e9e2fcf5fef57
| 13
|
py
|
Python
|
ProximityScore/ProximityScore.py
|
IndyMPO/IndyGeoprocessingTools
|
968f9befc37252e065e8d8085c0d10f17a871152
|
[
"Apache-2.0"
] | null | null | null |
ProximityScore/ProximityScore.py
|
IndyMPO/IndyGeoprocessingTools
|
968f9befc37252e065e8d8085c0d10f17a871152
|
[
"Apache-2.0"
] | 3
|
2016-08-30T16:10:20.000Z
|
2016-09-06T15:32:44.000Z
|
ProximityScore/ProximityScore.py
|
IndyMPO/IndyGeoprocessingTools
|
968f9befc37252e065e8d8085c0d10f17a871152
|
[
"Apache-2.0"
] | null | null | null |
import arcpy
| 6.5
| 12
| 0.846154
|
import arcpy
| true
| true
|
1c49969fcd2408de0311767f86ec53448a5425dc
| 80
|
py
|
Python
|
grokproject/__init__.py
|
zopefoundation/grokproject
|
78d00bded86dbc1cf8ed2f561c8221eda7e68e7a
|
[
"ZPL-2.1"
] | 4
|
2015-12-05T05:47:56.000Z
|
2017-08-22T13:45:02.000Z
|
grokproject/__init__.py
|
zopefoundation/grokproject
|
78d00bded86dbc1cf8ed2f561c8221eda7e68e7a
|
[
"ZPL-2.1"
] | 12
|
2015-12-03T11:58:01.000Z
|
2018-01-23T13:29:25.000Z
|
grokproject/__init__.py
|
zopefoundation/grokproject
|
78d00bded86dbc1cf8ed2f561c8221eda7e68e7a
|
[
"ZPL-2.1"
] | 5
|
2016-03-21T10:23:36.000Z
|
2020-09-27T02:47:31.000Z
|
from grokproject.templates import GrokProject
from grokproject.main import main
| 26.666667
| 45
| 0.875
|
from grokproject.templates import GrokProject
from grokproject.main import main
| true
| true
|
1c4996ec7c2dfcc43f4cb7feb849a3a3828a477f
| 858
|
py
|
Python
|
Virtualenv/Env/src/GoTravel/Contact/migrations/0001_initial.py
|
Anoop01234/Go-Travel
|
aa91f1a4ce7e7ed78de8eadc55e6a25d1a73bdd8
|
[
"MIT"
] | null | null | null |
Virtualenv/Env/src/GoTravel/Contact/migrations/0001_initial.py
|
Anoop01234/Go-Travel
|
aa91f1a4ce7e7ed78de8eadc55e6a25d1a73bdd8
|
[
"MIT"
] | null | null | null |
Virtualenv/Env/src/GoTravel/Contact/migrations/0001_initial.py
|
Anoop01234/Go-Travel
|
aa91f1a4ce7e7ed78de8eadc55e6a25d1a73bdd8
|
[
"MIT"
] | 1
|
2021-12-21T17:27:34.000Z
|
2021-12-21T17:27:34.000Z
|
# Generated by Django 3.0.7 on 2020-06-23 02:13
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstname', models.CharField(max_length=50)),
('lastname', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254)),
('subject', models.CharField(max_length=50)),
('message', models.TextField()),
],
options={
'verbose_name': 'Contact',
'verbose_name_plural': 'Contacts',
},
),
]
| 28.6
| 114
| 0.538462
|
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstname', models.CharField(max_length=50)),
('lastname', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254)),
('subject', models.CharField(max_length=50)),
('message', models.TextField()),
],
options={
'verbose_name': 'Contact',
'verbose_name_plural': 'Contacts',
},
),
]
| true
| true
|
1c4997d7ab42517d38d4ecafb5aa2ac189d46bdc
| 1,885
|
py
|
Python
|
inbm/cloudadapter-agent/tests/unit/test_cloudadapter.py
|
ahameedx/intel-inb-manageability
|
aca445fa4cef0b608e6e88e74476547e10c06073
|
[
"Apache-2.0"
] | 5
|
2021-12-13T21:19:31.000Z
|
2022-01-18T18:29:43.000Z
|
inbm/cloudadapter-agent/tests/unit/test_cloudadapter.py
|
ahameedx/intel-inb-manageability
|
aca445fa4cef0b608e6e88e74476547e10c06073
|
[
"Apache-2.0"
] | 45
|
2021-12-30T17:21:09.000Z
|
2022-03-29T22:47:32.000Z
|
inbm/cloudadapter-agent/tests/unit/test_cloudadapter.py
|
ahameedx/intel-inb-manageability
|
aca445fa4cef0b608e6e88e74476547e10c06073
|
[
"Apache-2.0"
] | 4
|
2022-01-26T17:42:54.000Z
|
2022-03-30T04:48:04.000Z
|
"""
Unit tests for the cloudadapter file
"""
import unittest
import mock
import sys
import cloudadapter.cloudadapter as cloudadapter
from cloudadapter.cloudadapter import CloudAdapter
from cloudadapter.exceptions import BadConfigError
class TestCloudAdapter(unittest.TestCase):
@mock.patch('cloudadapter.cloudadapter.fileConfig', autospec=True)
@mock.patch('cloudadapter.cloudadapter.Waiter', autospec=True)
@mock.patch('cloudadapter.cloudadapter.Client', autospec=True)
def test_cloudadapter_starts_client_succeeds(self, MockClient, MockWaiter, mock_fileConfig):
cloudadapter.main()
assert MockClient.return_value.start.call_count == 1
@mock.patch('cloudadapter.cloudadapter.fileConfig', autospec=True)
@mock.patch('cloudadapter.cloudadapter.logging', autospec=True)
@mock.patch('cloudadapter.cloudadapter.Waiter', autospec=True)
@mock.patch('cloudadapter.cloudadapter.Client', autospec=True)
def test_cloudadapter_logs_and_exits_client_error_succeeds(
self, MockClient, MockWaiter, mock_logging, mock_fileConfig):
MockClient.side_effect = BadConfigError("Error!")
mock_logger = mock_logging.getLogger.return_value
cloudadapter.main()
if sys.version_info >= (3, 6):
assert mock_logger.error.call_count == 1
else:
assert mock_logger.error.call_count == 2
assert MockClient.return_value.start.call_count == 0
@mock.patch('cloudadapter.cloudadapter.fileConfig', autospec=True)
@mock.patch('cloudadapter.cloudadapter.Waiter', autospec=True)
@mock.patch('cloudadapter.cloudadapter.Client', autospec=True)
def test_service_name_prefixed_inbm(self, MockClient, MockWaiter, mock_fileConfig):
ca = CloudAdapter()
self.assertFalse(' ' in ca._svc_name_)
self.assertEquals(ca._svc_name_.split('-')[0], 'inbm')
| 39.270833
| 96
| 0.741114
|
import unittest
import mock
import sys
import cloudadapter.cloudadapter as cloudadapter
from cloudadapter.cloudadapter import CloudAdapter
from cloudadapter.exceptions import BadConfigError
class TestCloudAdapter(unittest.TestCase):
@mock.patch('cloudadapter.cloudadapter.fileConfig', autospec=True)
@mock.patch('cloudadapter.cloudadapter.Waiter', autospec=True)
@mock.patch('cloudadapter.cloudadapter.Client', autospec=True)
def test_cloudadapter_starts_client_succeeds(self, MockClient, MockWaiter, mock_fileConfig):
cloudadapter.main()
assert MockClient.return_value.start.call_count == 1
@mock.patch('cloudadapter.cloudadapter.fileConfig', autospec=True)
@mock.patch('cloudadapter.cloudadapter.logging', autospec=True)
@mock.patch('cloudadapter.cloudadapter.Waiter', autospec=True)
@mock.patch('cloudadapter.cloudadapter.Client', autospec=True)
def test_cloudadapter_logs_and_exits_client_error_succeeds(
self, MockClient, MockWaiter, mock_logging, mock_fileConfig):
MockClient.side_effect = BadConfigError("Error!")
mock_logger = mock_logging.getLogger.return_value
cloudadapter.main()
if sys.version_info >= (3, 6):
assert mock_logger.error.call_count == 1
else:
assert mock_logger.error.call_count == 2
assert MockClient.return_value.start.call_count == 0
@mock.patch('cloudadapter.cloudadapter.fileConfig', autospec=True)
@mock.patch('cloudadapter.cloudadapter.Waiter', autospec=True)
@mock.patch('cloudadapter.cloudadapter.Client', autospec=True)
def test_service_name_prefixed_inbm(self, MockClient, MockWaiter, mock_fileConfig):
ca = CloudAdapter()
self.assertFalse(' ' in ca._svc_name_)
self.assertEquals(ca._svc_name_.split('-')[0], 'inbm')
| true
| true
|
1c4997ef0d7b724a972be742252a3257c5688768
| 660
|
py
|
Python
|
videochat/pyserver/manage.py
|
GenBInc/quickhellou
|
fb97f995904a8397c631a7256f86905c5b16a7c0
|
[
"MIT"
] | 1
|
2022-03-31T13:18:41.000Z
|
2022-03-31T13:18:41.000Z
|
videochat/pyserver/manage.py
|
GenBInc/quickhellou
|
fb97f995904a8397c631a7256f86905c5b16a7c0
|
[
"MIT"
] | null | null | null |
videochat/pyserver/manage.py
|
GenBInc/quickhellou
|
fb97f995904a8397c631a7256f86905c5b16a7c0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'qhv2.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.695652
| 73
| 0.677273
|
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'qhv2.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true
| true
|
1c4998e95581100279df8ab9274b91042ee7fc13
| 479
|
py
|
Python
|
software/old_stuff/lm75.py
|
84ace/esp32_smart_keezer
|
48f13ab377de82d3eea2c7a769ff3f82ff48fdd9
|
[
"MIT"
] | 1
|
2022-01-27T21:30:10.000Z
|
2022-01-27T21:30:10.000Z
|
software/old_stuff/lm75.py
|
84ace/esp32_smart_keezer
|
48f13ab377de82d3eea2c7a769ff3f82ff48fdd9
|
[
"MIT"
] | null | null | null |
software/old_stuff/lm75.py
|
84ace/esp32_smart_keezer
|
48f13ab377de82d3eea2c7a769ff3f82ff48fdd9
|
[
"MIT"
] | null | null | null |
class LM75(object):
ADDRESS = 0x48 # LM75 bus address
FREQUENCY = 100000 # I2C bus frequency
def __init__(self):
pass
def get_output(self):
"""Return raw output from the LM75 sensor."""
output = self.i2c.readfrom(self.ADDRESS, 2)
return output[0], output[1]
def get_temp(self):
"""Return a tuple of (temp_c, point)."""
temp = self.get_output()
return int(temp[0]), floor(int(temp[1]) / 23)
| 28.176471
| 53
| 0.580376
|
class LM75(object):
ADDRESS = 0x48
FREQUENCY = 100000
def __init__(self):
pass
def get_output(self):
output = self.i2c.readfrom(self.ADDRESS, 2)
return output[0], output[1]
def get_temp(self):
temp = self.get_output()
return int(temp[0]), floor(int(temp[1]) / 23)
| true
| true
|
1c499a6ec3f70ca7497e6aebb8e03ced8e3f52ca
| 91
|
py
|
Python
|
framenet/apps.py
|
henryyang42/lifelog_annotation
|
586f44132508f59e97dda701bd5602d26b79a6f4
|
[
"MIT"
] | null | null | null |
framenet/apps.py
|
henryyang42/lifelog_annotation
|
586f44132508f59e97dda701bd5602d26b79a6f4
|
[
"MIT"
] | null | null | null |
framenet/apps.py
|
henryyang42/lifelog_annotation
|
586f44132508f59e97dda701bd5602d26b79a6f4
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class FramenetConfig(AppConfig):
name = 'framenet'
| 15.166667
| 33
| 0.758242
|
from django.apps import AppConfig
class FramenetConfig(AppConfig):
name = 'framenet'
| true
| true
|
1c499b1f6bad0e0e9467d88057d523b47ce4dcbc
| 1,301
|
py
|
Python
|
notifications/utils/models/tests.py
|
Revibe-Music/core-services
|
6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2
|
[
"MIT"
] | 2
|
2022-01-24T23:30:18.000Z
|
2022-01-26T00:21:22.000Z
|
notifications/utils/models/tests.py
|
Revibe-Music/core-services
|
6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2
|
[
"MIT"
] | null | null | null |
notifications/utils/models/tests.py
|
Revibe-Music/core-services
|
6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2
|
[
"MIT"
] | null | null | null |
"""
Created: 10 June 2020
"""
from revibe._helpers.test import RevibeTestCase
from notifications.models import Notification
from notifications.utils.models.notification import create_notification_uuid, mark_email_as_read
# -----------------------------------------------------------------------------
class NotificationUtilsTestCase(RevibeTestCase):
def setUp(self):
self._get_application()
self._get_user()
self._get_external_event()
self._get_external_event_template()
def test_create_notification_uuid(self):
# create notifications with IDs
pass
def test_mark_email_as_read(self):
# create notification
notif_tracking_id = create_notification_uuid()
Notification.objects.create(event_template=self.external_event_template, user=self.user, read_id=notif_tracking_id)
# call function
mark_email_as_read(notif_tracking_id)
# check stuff
notif = Notification.objects.get(read_id=notif_tracking_id)
self.assertTrue(
notif.seen,
msg="Notification 'seen' field has not been changed"
)
self.assertEqual(
bool(notif.date_seen), True,
msg="The notification 'date_seen' field has not been updated"
)
| 28.282609
| 123
| 0.651038
|
from revibe._helpers.test import RevibeTestCase
from notifications.models import Notification
from notifications.utils.models.notification import create_notification_uuid, mark_email_as_read
class NotificationUtilsTestCase(RevibeTestCase):
def setUp(self):
self._get_application()
self._get_user()
self._get_external_event()
self._get_external_event_template()
def test_create_notification_uuid(self):
pass
def test_mark_email_as_read(self):
notif_tracking_id = create_notification_uuid()
Notification.objects.create(event_template=self.external_event_template, user=self.user, read_id=notif_tracking_id)
mark_email_as_read(notif_tracking_id)
notif = Notification.objects.get(read_id=notif_tracking_id)
self.assertTrue(
notif.seen,
msg="Notification 'seen' field has not been changed"
)
self.assertEqual(
bool(notif.date_seen), True,
msg="The notification 'date_seen' field has not been updated"
)
| true
| true
|
1c499fc2f86c8a8a004cb58ecc8b62a1fa49790d
| 485
|
py
|
Python
|
parsl/tests/low_latency/utils.py
|
cylondata/parsl
|
00ff9372bd841dafef8a0b3566c79ffe68f0e367
|
[
"Apache-2.0"
] | 323
|
2017-07-28T21:31:27.000Z
|
2022-03-05T13:06:05.000Z
|
parsl/tests/low_latency/utils.py
|
cylondata/parsl
|
00ff9372bd841dafef8a0b3566c79ffe68f0e367
|
[
"Apache-2.0"
] | 1,286
|
2017-06-01T16:50:00.000Z
|
2022-03-31T16:45:14.000Z
|
parsl/tests/low_latency/utils.py
|
cylondata/parsl
|
00ff9372bd841dafef8a0b3566c79ffe68f0e367
|
[
"Apache-2.0"
] | 113
|
2017-06-03T11:38:40.000Z
|
2022-03-26T16:43:05.000Z
|
import subprocess
def ping_time(ip, n=5):
"""
Returns the average ping time in microseconds.
Note: This function is inherently platform specific.
It currently works on Midway.
"""
cmd = "ping {} -c {}".format(ip, n)
p = subprocess.Popen(cmd.split(" "), stdout=subprocess.PIPE)
output = str(p.communicate()[0])
stats = output.split("\n")[-1].split(" = ")[-1].split("/")
avg_ping_time = float(stats[1]) # In ms
return avg_ping_time * 1000
| 28.529412
| 64
| 0.62268
|
import subprocess
def ping_time(ip, n=5):
cmd = "ping {} -c {}".format(ip, n)
p = subprocess.Popen(cmd.split(" "), stdout=subprocess.PIPE)
output = str(p.communicate()[0])
stats = output.split("\n")[-1].split(" = ")[-1].split("/")
avg_ping_time = float(stats[1])
return avg_ping_time * 1000
| true
| true
|
1c49a0de93121462a77b6c4abf13849b5433cd52
| 4,847
|
py
|
Python
|
chapter07/detect_car_bow_svm_sliding_window.py
|
insoo223/openCVhowse
|
d8885ab4f87a9d577fd660e60d41222dc2156332
|
[
"BSD-3-Clause"
] | 286
|
2019-06-29T11:47:40.000Z
|
2022-03-29T08:41:28.000Z
|
chapter07/detect_car_bow_svm_sliding_window.py
|
insoo223/openCVhowse
|
d8885ab4f87a9d577fd660e60d41222dc2156332
|
[
"BSD-3-Clause"
] | 8
|
2020-10-01T17:48:04.000Z
|
2022-03-26T04:27:06.000Z
|
chapter07/detect_car_bow_svm_sliding_window.py
|
insoo223/openCVhowse
|
d8885ab4f87a9d577fd660e60d41222dc2156332
|
[
"BSD-3-Clause"
] | 153
|
2019-07-01T02:53:02.000Z
|
2022-03-28T08:43:44.000Z
|
import cv2
import numpy as np
import os
from non_max_suppression import non_max_suppression_fast as nms
if not os.path.isdir('CarData'):
print('CarData folder not found. Please download and unzip '
'http://l2r.cs.uiuc.edu/~cogcomp/Data/Car/CarData.tar.gz '
'or https://github.com/gcr/arc-evaluator/raw/master/CarData.tar.gz '
'into the same folder as this script.')
exit(1)
BOW_NUM_TRAINING_SAMPLES_PER_CLASS = 10
SVM_NUM_TRAINING_SAMPLES_PER_CLASS = 100
SVM_SCORE_THRESHOLD = 1.8
NMS_OVERLAP_THRESHOLD = 0.15
sift = cv2.xfeatures2d.SIFT_create()
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = {}
flann = cv2.FlannBasedMatcher(index_params, search_params)
bow_kmeans_trainer = cv2.BOWKMeansTrainer(12)
bow_extractor = cv2.BOWImgDescriptorExtractor(sift, flann)
def get_pos_and_neg_paths(i):
pos_path = 'CarData/TrainImages/pos-%d.pgm' % (i+1)
neg_path = 'CarData/TrainImages/neg-%d.pgm' % (i+1)
return pos_path, neg_path
def add_sample(path):
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
keypoints, descriptors = sift.detectAndCompute(img, None)
if descriptors is not None:
bow_kmeans_trainer.add(descriptors)
for i in range(BOW_NUM_TRAINING_SAMPLES_PER_CLASS):
pos_path, neg_path = get_pos_and_neg_paths(i)
add_sample(pos_path)
add_sample(neg_path)
voc = bow_kmeans_trainer.cluster()
bow_extractor.setVocabulary(voc)
def extract_bow_descriptors(img):
features = sift.detect(img)
return bow_extractor.compute(img, features)
training_data = []
training_labels = []
for i in range(SVM_NUM_TRAINING_SAMPLES_PER_CLASS):
pos_path, neg_path = get_pos_and_neg_paths(i)
pos_img = cv2.imread(pos_path, cv2.IMREAD_GRAYSCALE)
pos_descriptors = extract_bow_descriptors(pos_img)
if pos_descriptors is not None:
training_data.extend(pos_descriptors)
training_labels.append(1)
neg_img = cv2.imread(neg_path, cv2.IMREAD_GRAYSCALE)
neg_descriptors = extract_bow_descriptors(neg_img)
if neg_descriptors is not None:
training_data.extend(neg_descriptors)
training_labels.append(-1)
svm = cv2.ml.SVM_create()
svm.setType(cv2.ml.SVM_C_SVC)
svm.setC(50)
svm.train(np.array(training_data), cv2.ml.ROW_SAMPLE,
np.array(training_labels))
def pyramid(img, scale_factor=1.25, min_size=(200, 80),
max_size=(600, 600)):
h, w = img.shape
min_w, min_h = min_size
max_w, max_h = max_size
while w >= min_w and h >= min_h:
if w <= max_w and h <= max_h:
yield img
w /= scale_factor
h /= scale_factor
img = cv2.resize(img, (int(w), int(h)),
interpolation=cv2.INTER_AREA)
def sliding_window(img, step=20, window_size=(100, 40)):
img_h, img_w = img.shape
window_w, window_h = window_size
for y in range(0, img_w, step):
for x in range(0, img_h, step):
roi = img[y:y+window_h, x:x+window_w]
roi_h, roi_w = roi.shape
if roi_w == window_w and roi_h == window_h:
yield (x, y, roi)
for test_img_path in ['CarData/TestImages/test-0.pgm',
'CarData/TestImages/test-1.pgm',
'../images/car.jpg',
'../images/haying.jpg',
'../images/statue.jpg',
'../images/woodcutters.jpg']:
img = cv2.imread(test_img_path)
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
pos_rects = []
for resized in pyramid(gray_img):
for x, y, roi in sliding_window(resized):
descriptors = extract_bow_descriptors(roi)
if descriptors is None:
continue
prediction = svm.predict(descriptors)
if prediction[1][0][0] == 1.0:
raw_prediction = svm.predict(
descriptors, flags=cv2.ml.STAT_MODEL_RAW_OUTPUT)
score = -raw_prediction[1][0][0]
if score > SVM_SCORE_THRESHOLD:
h, w = roi.shape
scale = gray_img.shape[0] / float(resized.shape[0])
pos_rects.append([int(x * scale),
int(y * scale),
int((x+w) * scale),
int((y+h) * scale),
score])
pos_rects = nms(np.array(pos_rects), NMS_OVERLAP_THRESHOLD)
for x0, y0, x1, y1, score in pos_rects:
cv2.rectangle(img, (int(x0), int(y0)), (int(x1), int(y1)),
(0, 255, 255), 2)
text = '%.2f' % score
cv2.putText(img, text, (int(x0), int(y0) - 20),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
cv2.imshow(test_img_path, img)
cv2.waitKey(0)
| 36.443609
| 78
| 0.621003
|
import cv2
import numpy as np
import os
from non_max_suppression import non_max_suppression_fast as nms
if not os.path.isdir('CarData'):
print('CarData folder not found. Please download and unzip '
'http://l2r.cs.uiuc.edu/~cogcomp/Data/Car/CarData.tar.gz '
'or https://github.com/gcr/arc-evaluator/raw/master/CarData.tar.gz '
'into the same folder as this script.')
exit(1)
BOW_NUM_TRAINING_SAMPLES_PER_CLASS = 10
SVM_NUM_TRAINING_SAMPLES_PER_CLASS = 100
SVM_SCORE_THRESHOLD = 1.8
NMS_OVERLAP_THRESHOLD = 0.15
sift = cv2.xfeatures2d.SIFT_create()
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = {}
flann = cv2.FlannBasedMatcher(index_params, search_params)
bow_kmeans_trainer = cv2.BOWKMeansTrainer(12)
bow_extractor = cv2.BOWImgDescriptorExtractor(sift, flann)
def get_pos_and_neg_paths(i):
pos_path = 'CarData/TrainImages/pos-%d.pgm' % (i+1)
neg_path = 'CarData/TrainImages/neg-%d.pgm' % (i+1)
return pos_path, neg_path
def add_sample(path):
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
keypoints, descriptors = sift.detectAndCompute(img, None)
if descriptors is not None:
bow_kmeans_trainer.add(descriptors)
for i in range(BOW_NUM_TRAINING_SAMPLES_PER_CLASS):
pos_path, neg_path = get_pos_and_neg_paths(i)
add_sample(pos_path)
add_sample(neg_path)
voc = bow_kmeans_trainer.cluster()
bow_extractor.setVocabulary(voc)
def extract_bow_descriptors(img):
features = sift.detect(img)
return bow_extractor.compute(img, features)
training_data = []
training_labels = []
for i in range(SVM_NUM_TRAINING_SAMPLES_PER_CLASS):
pos_path, neg_path = get_pos_and_neg_paths(i)
pos_img = cv2.imread(pos_path, cv2.IMREAD_GRAYSCALE)
pos_descriptors = extract_bow_descriptors(pos_img)
if pos_descriptors is not None:
training_data.extend(pos_descriptors)
training_labels.append(1)
neg_img = cv2.imread(neg_path, cv2.IMREAD_GRAYSCALE)
neg_descriptors = extract_bow_descriptors(neg_img)
if neg_descriptors is not None:
training_data.extend(neg_descriptors)
training_labels.append(-1)
svm = cv2.ml.SVM_create()
svm.setType(cv2.ml.SVM_C_SVC)
svm.setC(50)
svm.train(np.array(training_data), cv2.ml.ROW_SAMPLE,
np.array(training_labels))
def pyramid(img, scale_factor=1.25, min_size=(200, 80),
max_size=(600, 600)):
h, w = img.shape
min_w, min_h = min_size
max_w, max_h = max_size
while w >= min_w and h >= min_h:
if w <= max_w and h <= max_h:
yield img
w /= scale_factor
h /= scale_factor
img = cv2.resize(img, (int(w), int(h)),
interpolation=cv2.INTER_AREA)
def sliding_window(img, step=20, window_size=(100, 40)):
img_h, img_w = img.shape
window_w, window_h = window_size
for y in range(0, img_w, step):
for x in range(0, img_h, step):
roi = img[y:y+window_h, x:x+window_w]
roi_h, roi_w = roi.shape
if roi_w == window_w and roi_h == window_h:
yield (x, y, roi)
for test_img_path in ['CarData/TestImages/test-0.pgm',
'CarData/TestImages/test-1.pgm',
'../images/car.jpg',
'../images/haying.jpg',
'../images/statue.jpg',
'../images/woodcutters.jpg']:
img = cv2.imread(test_img_path)
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
pos_rects = []
for resized in pyramid(gray_img):
for x, y, roi in sliding_window(resized):
descriptors = extract_bow_descriptors(roi)
if descriptors is None:
continue
prediction = svm.predict(descriptors)
if prediction[1][0][0] == 1.0:
raw_prediction = svm.predict(
descriptors, flags=cv2.ml.STAT_MODEL_RAW_OUTPUT)
score = -raw_prediction[1][0][0]
if score > SVM_SCORE_THRESHOLD:
h, w = roi.shape
scale = gray_img.shape[0] / float(resized.shape[0])
pos_rects.append([int(x * scale),
int(y * scale),
int((x+w) * scale),
int((y+h) * scale),
score])
pos_rects = nms(np.array(pos_rects), NMS_OVERLAP_THRESHOLD)
for x0, y0, x1, y1, score in pos_rects:
cv2.rectangle(img, (int(x0), int(y0)), (int(x1), int(y1)),
(0, 255, 255), 2)
text = '%.2f' % score
cv2.putText(img, text, (int(x0), int(y0) - 20),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
cv2.imshow(test_img_path, img)
cv2.waitKey(0)
| true
| true
|
1c49a501a055fdf11fd4806f3310e16640bf3419
| 1,209
|
py
|
Python
|
experiments_dikower/controllers/drlbox/net/q_net.py
|
prokhn/onti-2019-bigdata
|
b9296141958f544177388be94072efce7bdc7814
|
[
"MIT"
] | null | null | null |
experiments_dikower/controllers/drlbox/net/q_net.py
|
prokhn/onti-2019-bigdata
|
b9296141958f544177388be94072efce7bdc7814
|
[
"MIT"
] | null | null | null |
experiments_dikower/controllers/drlbox/net/q_net.py
|
prokhn/onti-2019-bigdata
|
b9296141958f544177388be94072efce7bdc7814
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from drlbox.common.namescope import TF_NAMESCOPE
from drlbox.net.net_base import RLNet
class QNet(RLNet):
def set_model(self, model):
self.model = model
self.weights = model.weights
self.ph_state, = model.inputs
self.tf_values, = model.outputs
def set_loss(self):
with tf.name_scope(TF_NAMESCOPE):
ph_action = tf.placeholder(tf.int32, [None])
onehot_act = tf.one_hot(ph_action, depth=self.tf_values.shape[1])
ph_target = tf.placeholder(tf.float32, [None])
value_act = tf.reduce_sum(self.tf_values * onehot_act, axis=1)
# loss
self.tf_loss = tf.losses.huber_loss(ph_target, value_act,
reduction=tf.losses.Reduction.NONE)
# error for prioritization: abs td error
self.tf_error = tf.abs(ph_target - value_act)
# kfac loss list
self.kfac_loss_list = [('normal_predictive', (self.tf_values,))]
# placeholder list
self.ph_train_list = [self.ph_state, ph_action, ph_target]
def action_values(self, state):
return self.sess.run(self.tf_values, feed_dict={self.ph_state: state})
| 31.815789
| 78
| 0.643507
|
import tensorflow as tf
from drlbox.common.namescope import TF_NAMESCOPE
from drlbox.net.net_base import RLNet
class QNet(RLNet):
def set_model(self, model):
self.model = model
self.weights = model.weights
self.ph_state, = model.inputs
self.tf_values, = model.outputs
def set_loss(self):
with tf.name_scope(TF_NAMESCOPE):
ph_action = tf.placeholder(tf.int32, [None])
onehot_act = tf.one_hot(ph_action, depth=self.tf_values.shape[1])
ph_target = tf.placeholder(tf.float32, [None])
value_act = tf.reduce_sum(self.tf_values * onehot_act, axis=1)
self.tf_loss = tf.losses.huber_loss(ph_target, value_act,
reduction=tf.losses.Reduction.NONE)
self.tf_error = tf.abs(ph_target - value_act)
self.kfac_loss_list = [('normal_predictive', (self.tf_values,))]
self.ph_train_list = [self.ph_state, ph_action, ph_target]
def action_values(self, state):
return self.sess.run(self.tf_values, feed_dict={self.ph_state: state})
| true
| true
|
1c49a67e7bb4ec8b16df469d4c6b5c559bb2054a
| 493
|
py
|
Python
|
dostaweemvse/dostaweemvse/models/order.py
|
ale3otik/DostaweemWse
|
0887d47cbe5fba30c3c2b0ecf064d151efd961d0
|
[
"MIT"
] | 3
|
2017-12-10T17:41:22.000Z
|
2017-12-12T20:27:31.000Z
|
dostaweemvse/dostaweemvse/models/order.py
|
ale3otik/DostaweemWse
|
0887d47cbe5fba30c3c2b0ecf064d151efd961d0
|
[
"MIT"
] | null | null | null |
dostaweemvse/dostaweemvse/models/order.py
|
ale3otik/DostaweemWse
|
0887d47cbe5fba30c3c2b0ecf064d151efd961d0
|
[
"MIT"
] | null | null | null |
from django.db import models
from .route import Route
from .location import Location
class Order(models.Model):
route = models.ForeignKey(Route, on_delete=models.CASCADE)
metadata = models.CharField(max_length=50)
from_location = models.ForeignKey(Location, on_delete=models.CASCADE, related_name='location3')
to_location = models.ForeignKey(Location, on_delete=models.CASCADE, related_name='location4')
max_cost = models.IntegerField()
weight = models.IntegerField()
| 41.083333
| 99
| 0.774848
|
from django.db import models
from .route import Route
from .location import Location
class Order(models.Model):
route = models.ForeignKey(Route, on_delete=models.CASCADE)
metadata = models.CharField(max_length=50)
from_location = models.ForeignKey(Location, on_delete=models.CASCADE, related_name='location3')
to_location = models.ForeignKey(Location, on_delete=models.CASCADE, related_name='location4')
max_cost = models.IntegerField()
weight = models.IntegerField()
| true
| true
|
1c49a68dfda9e081e60939541072bfa57b8b2ac7
| 99
|
py
|
Python
|
qiniuFolderSync/utils.py
|
ipconfiger/qiniuFolderSync
|
0e1362bb3dda6ca898040f8e019e712d6ed1db6b
|
[
"MIT"
] | null | null | null |
qiniuFolderSync/utils.py
|
ipconfiger/qiniuFolderSync
|
0e1362bb3dda6ca898040f8e019e712d6ed1db6b
|
[
"MIT"
] | null | null | null |
qiniuFolderSync/utils.py
|
ipconfiger/qiniuFolderSync
|
0e1362bb3dda6ca898040f8e019e712d6ed1db6b
|
[
"MIT"
] | null | null | null |
# coding=utf8
import os
def getpath(dir_path, *path):
return os.path.join(dir_path, *path)
| 11
| 40
| 0.686869
|
import os
def getpath(dir_path, *path):
return os.path.join(dir_path, *path)
| true
| true
|
1c49a6bad5a201d8712133721c743ef53f0ea197
| 2,275
|
py
|
Python
|
tests/unit/flow/test_flow_before_after.py
|
afizs/jina
|
52c554c2d593e24129e86dfe3c71bf04f1495082
|
[
"Apache-2.0"
] | 3
|
2021-07-30T09:47:54.000Z
|
2021-07-31T22:29:20.000Z
|
tests/unit/flow/test_flow_before_after.py
|
sheetal01761/jina
|
520fc0794fb43d96e1fc85534e9df3cf9c89c42e
|
[
"Apache-2.0"
] | 2
|
2021-07-14T14:07:18.000Z
|
2022-02-06T05:00:41.000Z
|
tests/unit/flow/test_flow_before_after.py
|
sheetal01761/jina
|
520fc0794fb43d96e1fc85534e9df3cf9c89c42e
|
[
"Apache-2.0"
] | 2
|
2021-10-06T07:28:11.000Z
|
2021-11-18T20:20:18.000Z
|
import pytest
from jina import Executor, requests, __default_executor__
from jina import Flow
from tests import random_docs
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow(protocol):
docs = random_docs(10)
f = Flow(protocol=protocol).add(name='p1')
with f:
f.index(docs)
assert f.num_pods == 2
assert f._pod_nodes['p1'].num_peas == 1
assert f.num_peas == 2
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_before(protocol):
class MyExec(Executor):
@requests
def foo(self, **kwargs):
pass
docs = random_docs(10)
f = Flow(protocol=protocol).add(uses_before=MyExec, name='p1')
with f:
f.index(docs)
assert f.num_pods == 2
assert f._pod_nodes['p1'].num_peas == 2
assert f.num_peas == 3
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_after(protocol):
class MyExec(Executor):
@requests
def foo(self, **kwargs):
pass
docs = random_docs(10)
f = Flow(protocol=protocol).add(uses_after=MyExec, name='p1')
with f:
f.index(docs)
assert f.num_pods == 2
assert f._pod_nodes['p1'].num_peas == 2
assert f.num_peas == 3
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_default_before_after_is_ignored(protocol):
docs = random_docs(10)
f = Flow(protocol=protocol).add(
uses_after=__default_executor__, uses_before=__default_executor__, name='p1'
)
with f:
f.index(docs)
assert f.num_pods == 2
assert f._pod_nodes['p1'].num_peas == 1
assert f.num_peas == 2
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_before_after(protocol):
class MyExec(Executor):
@requests
def foo(self, **kwargs):
pass
docs = random_docs(10)
f = Flow(protocol=protocol).add(uses_before=MyExec, uses_after=MyExec, name='p1')
with f:
f.index(docs)
assert f.num_pods == 2
assert f._pod_nodes['p1'].num_peas == 3
assert f.num_peas == 4
| 26.149425
| 85
| 0.627692
|
import pytest
from jina import Executor, requests, __default_executor__
from jina import Flow
from tests import random_docs
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow(protocol):
docs = random_docs(10)
f = Flow(protocol=protocol).add(name='p1')
with f:
f.index(docs)
assert f.num_pods == 2
assert f._pod_nodes['p1'].num_peas == 1
assert f.num_peas == 2
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_before(protocol):
class MyExec(Executor):
@requests
def foo(self, **kwargs):
pass
docs = random_docs(10)
f = Flow(protocol=protocol).add(uses_before=MyExec, name='p1')
with f:
f.index(docs)
assert f.num_pods == 2
assert f._pod_nodes['p1'].num_peas == 2
assert f.num_peas == 3
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_after(protocol):
class MyExec(Executor):
@requests
def foo(self, **kwargs):
pass
docs = random_docs(10)
f = Flow(protocol=protocol).add(uses_after=MyExec, name='p1')
with f:
f.index(docs)
assert f.num_pods == 2
assert f._pod_nodes['p1'].num_peas == 2
assert f.num_peas == 3
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_default_before_after_is_ignored(protocol):
docs = random_docs(10)
f = Flow(protocol=protocol).add(
uses_after=__default_executor__, uses_before=__default_executor__, name='p1'
)
with f:
f.index(docs)
assert f.num_pods == 2
assert f._pod_nodes['p1'].num_peas == 1
assert f.num_peas == 2
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_before_after(protocol):
class MyExec(Executor):
@requests
def foo(self, **kwargs):
pass
docs = random_docs(10)
f = Flow(protocol=protocol).add(uses_before=MyExec, uses_after=MyExec, name='p1')
with f:
f.index(docs)
assert f.num_pods == 2
assert f._pod_nodes['p1'].num_peas == 3
assert f.num_peas == 4
| true
| true
|
1c49a6c86e3fa57ac97eba6d634a4f64dce2ee44
| 654
|
py
|
Python
|
runs/snort/10KB/src1-tgt1/ssl-par-max-iter00200.cfg.py
|
Largio/broeval
|
89e831d07f066100afdd1a5b220f9f08f1c10b3d
|
[
"MIT"
] | null | null | null |
runs/snort/10KB/src1-tgt1/ssl-par-max-iter00200.cfg.py
|
Largio/broeval
|
89e831d07f066100afdd1a5b220f9f08f1c10b3d
|
[
"MIT"
] | null | null | null |
runs/snort/10KB/src1-tgt1/ssl-par-max-iter00200.cfg.py
|
Largio/broeval
|
89e831d07f066100afdd1a5b220f9f08f1c10b3d
|
[
"MIT"
] | null | null | null |
# Write results to this file
OUTFILE = 'runs/snort/10KB/src1-tgt1/ssl-par-max-iter00200.result.csv'
# Source computers for the request
SOURCE = ['10.0.0.1']
# Target machines for the requests (aka server)
TARGET = ['10.0.0.2']
# IDS Mode. (ATM: noids, min, max, http, ssl, ftp, icmp, mysql)
IDSMODE = 'max'
# Connection mode (par = parallel, seq = sequential)
MODE = 'par'
# Number of evaluation repititions to run
EPOCHS = 100
# Number of iterations to be run in each evaluation repitition
ITER = 200
# Size of the file to be downloaded from target (in Bytes * 10^SIZE)
SIZE = 4
# Protocol to be used e.g. HTTP, SSL, FTP, MYSQL
PROTOCOL = 'ssl'
| 24.222222
| 70
| 0.701835
|
OUTFILE = 'runs/snort/10KB/src1-tgt1/ssl-par-max-iter00200.result.csv'
SOURCE = ['10.0.0.1']
TARGET = ['10.0.0.2']
IDSMODE = 'max'
MODE = 'par'
EPOCHS = 100
ITER = 200
SIZE = 4
PROTOCOL = 'ssl'
| true
| true
|
1c49a6f7de9175a674e3e1ba8b8c5f27a3fcd695
| 290
|
py
|
Python
|
1138_05_12-merge.py
|
nchaparr/Geospatial-Analysis-with-Python
|
6e0d1ff429baa4205c63bf842ab950ed4176536f
|
[
"CC0-1.0"
] | null | null | null |
1138_05_12-merge.py
|
nchaparr/Geospatial-Analysis-with-Python
|
6e0d1ff429baa4205c63bf842ab950ed4176536f
|
[
"CC0-1.0"
] | null | null | null |
1138_05_12-merge.py
|
nchaparr/Geospatial-Analysis-with-Python
|
6e0d1ff429baa4205c63bf842ab950ed4176536f
|
[
"CC0-1.0"
] | null | null | null |
"""Merge multiple shapefiles"""
import glob
import shapefile
files = glob.glob("footprints_*shp")
w = shapefile.Writer()
r = None
for f in files:
r = shapefile.Reader(f)
w._shapes.extend(r.shapes())
w.records.extend(r.records())
w.fields = list(r.fields)
w.save("Merged")
| 22.307692
| 37
| 0.675862
|
import glob
import shapefile
files = glob.glob("footprints_*shp")
w = shapefile.Writer()
r = None
for f in files:
r = shapefile.Reader(f)
w._shapes.extend(r.shapes())
w.records.extend(r.records())
w.fields = list(r.fields)
w.save("Merged")
| true
| true
|
1c49a7f9ce091fecb34b660858eb1cfba5796214
| 7,240
|
py
|
Python
|
pynot/tests.py
|
adamora/pynot
|
47abb7e9db85301a976c012380e3963c64590414
|
[
"Apache-2.0"
] | 6
|
2018-09-15T08:05:34.000Z
|
2019-01-19T22:51:27.000Z
|
pynot/tests.py
|
intelligenia/pynot
|
47abb7e9db85301a976c012380e3963c64590414
|
[
"Apache-2.0"
] | null | null | null |
pynot/tests.py
|
intelligenia/pynot
|
47abb7e9db85301a976c012380e3963c64590414
|
[
"Apache-2.0"
] | null | null | null |
from django.test import TestCase
from pynot.models import *
from pynot.factories import *
from rest_assured.testcases import *
from django.contrib.auth import get_user_model
from rest_framework.authtoken.models import Token
from rest_framework import serializers
class CategoryTestCase(ReadRESTAPITestCaseMixin,
BaseRESTAPITestCase):
base_name = 'category'
factory_class = CategoryFactory
lookup_field = 'id'
attributes_to_check = ['id', 'name']
def setUp(self):
admin=get_user_model().objects.create_superuser(
email="admin@example.com", password="admin")
token = Token.objects.get_or_create(user=admin)[0].key
headers = {'HTTP_AUTHORIZATION': 'Token ' + token}
self.client.credentials(**headers)
models.PyNot.sync_settings()
super(CategoryTestCase, self).setUp()
class EventTestCase(DetailAPITestCaseMixin,
BaseRESTAPITestCase):
base_name = 'event'
factory_class = EventFactory
lookup_field = 'id'
attributes_to_check = ['id', 'name', 'description']
def setUp(self):
admin=get_user_model().objects.create_superuser(
email="admin@example.com", password="admin")
token = Token.objects.get_or_create(user=admin)[0].key
headers = {'HTTP_AUTHORIZATION': 'Token ' + token}
self.client.credentials(**headers)
super(EventTestCase, self).setUp()
class ParameterTestSerializer(serializers.ModelSerializer):
class Meta:
model = Event
fields = ('id', 'name')
extra_fields_human_name = {'id':'ID',
'name':'Nombre'}
extra_fields_email = ('name',)
class EventTestSerializer(serializers.ModelSerializer):
parameters = ParameterTestSerializer(many=True)
class Meta:
model = Event
fields = ('id', 'name', 'description', 'parameters')
extra_fields_human_name = {'id':'ID',
'name':'Nombre',
'description':u'Descripción',
'parameters':u'Parámetros'}
extra_fields_group = ('id',)
class CategoryTestSerializer(serializers.ModelSerializer):
events = EventTestSerializer(many=True)
class Meta:
model = Category
fields = ('id', 'name', 'events')
extra_fields_human_name = {'name':'Nombre',
'events':'Eventos'}
extra_fields_email = ('name',)
extra_fields_user = ('id',)
class ParameterTestCase(TestCase):
category = None
event = None
parameter = None
notification = None
def setUp(self):
self.category = CategoryFactory.create(name='cat_name')
self.event = EventFactory.create(category=self.category,
name='event_name',
slug='slug_event')
event = EventFactory.create(category=self.category,
name='event_name2')
self.parameter = ParameterFactory.create(event=self.event,
serializer='pynot.tests.CategoryTestSerializer',
name='param_name',
human_name='Categoria')
parameter = ParameterFactory.create(event=event,
serializer='pynot.tests.CategoryTestSerializer',
name='param_name2',
human_name='Parametro 2')
parameter = ParameterFactory.create(event=event,
serializer='pynot.tests.CategoryTestSerializer',
name='param_name3',
human_name='Parametro 3')
self.notification = EventNotificationFactory.create(event=self.event,
name='Mensaje de alta de usuario',
message='El nombre de la categoria es param_name.name')
EventNotificationRecipientFactory.create(
notification=self.notification, recipient='test@test.com',
type='email')
EventNotificationRecipientFactory.create(
notification=self.notification, recipient='param_name.events.parameters.name',
type='email')
EventNotificationRecipientFactory.create(
notification=self.notification, recipient='1',
type='user')
EventNotificationRecipientFactory.create(
notification=self.notification, recipient='2',
type='user')
def test_get_serializer_data_body(self):
data_body = self.parameter.data_body
self.assertEqual(data_body["id"]["human_name"], "id")
self.assertTrue('events' not in data_body)
def test_get_serializer_data_emails(self):
data_email = self.parameter.data_email
self.assertTrue("id" not in data_email)
self.assertEqual(data_email["events"]["data"]["parameters"]\
["data"]["name"]["human_name"], "Nombre")
def test_get_serializer_data_users(self):
data_user = self.parameter.data_user
self.assertTrue("name" not in data_user)
self.assertTrue("id" in data_user)
def test_get_serializer_data_groups(self):
data_group = self.parameter.data_group
self.assertTrue("id" not in data_group)
self.assertEqual(data_group["events"]["data"]["id"]["human_name"],
"ID")
def test_fire(self):
self.event.fire(param_name=CategoryTestSerializer(self.category))
self.assertEqual(models.EventNotificationFire.objects.all().count(), 1)
self.assertEqual(models.Notification.objects.all().count(), 6)
self.event.fire(param_name=self.category)
self.assertEqual(models.EventNotificationFire.objects.all().count(), 2)
self.notification.collective = True
self.notification.save()
PyNot.event('slug_event').fire(param_name=self.category)
self.assertEqual(models.EventNotificationFire.objects.all().count(), 3)
self.assertEqual(models.Notification.objects.all().count(), 17) # 6 + 6 + 5 (collective one)
class EventNotificationTestCase(DetailAPITestCaseMixin,
WriteRESTAPITestCaseMixin,
BaseRESTAPITestCase):
base_name = 'eventnotification'
factory_class = EventNotificationFactory
lookup_field = 'id'
attributes_to_check = ['id', 'name', 'subject', 'message']
create_data = {'name' : 'Test notification',
'subject' : 'Test notification',
'message' : 'Test notification'}
update_data = {'name': 'Test notification updated',
'subject': 'Test notification updated',
'message': 'Test notification updated',
'recipients': []}
def setUp(self):
admin=get_user_model().objects.create_superuser(
email="admin@example.com", password="admin")
token = Token.objects.get_or_create(user=admin)[0].key
headers = {'HTTP_AUTHORIZATION': 'Token ' + token}
self.client.credentials(**headers)
super(EventNotificationTestCase, self).setUp()
def get_create_data(self):
data = self.create_data
data['event']=self.object.event_id
return data
| 36.565657
| 100
| 0.623895
|
from django.test import TestCase
from pynot.models import *
from pynot.factories import *
from rest_assured.testcases import *
from django.contrib.auth import get_user_model
from rest_framework.authtoken.models import Token
from rest_framework import serializers
class CategoryTestCase(ReadRESTAPITestCaseMixin,
BaseRESTAPITestCase):
base_name = 'category'
factory_class = CategoryFactory
lookup_field = 'id'
attributes_to_check = ['id', 'name']
def setUp(self):
admin=get_user_model().objects.create_superuser(
email="admin@example.com", password="admin")
token = Token.objects.get_or_create(user=admin)[0].key
headers = {'HTTP_AUTHORIZATION': 'Token ' + token}
self.client.credentials(**headers)
models.PyNot.sync_settings()
super(CategoryTestCase, self).setUp()
class EventTestCase(DetailAPITestCaseMixin,
BaseRESTAPITestCase):
base_name = 'event'
factory_class = EventFactory
lookup_field = 'id'
attributes_to_check = ['id', 'name', 'description']
def setUp(self):
admin=get_user_model().objects.create_superuser(
email="admin@example.com", password="admin")
token = Token.objects.get_or_create(user=admin)[0].key
headers = {'HTTP_AUTHORIZATION': 'Token ' + token}
self.client.credentials(**headers)
super(EventTestCase, self).setUp()
class ParameterTestSerializer(serializers.ModelSerializer):
class Meta:
model = Event
fields = ('id', 'name')
extra_fields_human_name = {'id':'ID',
'name':'Nombre'}
extra_fields_email = ('name',)
class EventTestSerializer(serializers.ModelSerializer):
parameters = ParameterTestSerializer(many=True)
class Meta:
model = Event
fields = ('id', 'name', 'description', 'parameters')
extra_fields_human_name = {'id':'ID',
'name':'Nombre',
'description':u'Descripción',
'parameters':u'Parámetros'}
extra_fields_group = ('id',)
class CategoryTestSerializer(serializers.ModelSerializer):
events = EventTestSerializer(many=True)
class Meta:
model = Category
fields = ('id', 'name', 'events')
extra_fields_human_name = {'name':'Nombre',
'events':'Eventos'}
extra_fields_email = ('name',)
extra_fields_user = ('id',)
class ParameterTestCase(TestCase):
category = None
event = None
parameter = None
notification = None
def setUp(self):
self.category = CategoryFactory.create(name='cat_name')
self.event = EventFactory.create(category=self.category,
name='event_name',
slug='slug_event')
event = EventFactory.create(category=self.category,
name='event_name2')
self.parameter = ParameterFactory.create(event=self.event,
serializer='pynot.tests.CategoryTestSerializer',
name='param_name',
human_name='Categoria')
parameter = ParameterFactory.create(event=event,
serializer='pynot.tests.CategoryTestSerializer',
name='param_name2',
human_name='Parametro 2')
parameter = ParameterFactory.create(event=event,
serializer='pynot.tests.CategoryTestSerializer',
name='param_name3',
human_name='Parametro 3')
self.notification = EventNotificationFactory.create(event=self.event,
name='Mensaje de alta de usuario',
message='El nombre de la categoria es param_name.name')
EventNotificationRecipientFactory.create(
notification=self.notification, recipient='test@test.com',
type='email')
EventNotificationRecipientFactory.create(
notification=self.notification, recipient='param_name.events.parameters.name',
type='email')
EventNotificationRecipientFactory.create(
notification=self.notification, recipient='1',
type='user')
EventNotificationRecipientFactory.create(
notification=self.notification, recipient='2',
type='user')
def test_get_serializer_data_body(self):
data_body = self.parameter.data_body
self.assertEqual(data_body["id"]["human_name"], "id")
self.assertTrue('events' not in data_body)
def test_get_serializer_data_emails(self):
data_email = self.parameter.data_email
self.assertTrue("id" not in data_email)
self.assertEqual(data_email["events"]["data"]["parameters"]\
["data"]["name"]["human_name"], "Nombre")
def test_get_serializer_data_users(self):
data_user = self.parameter.data_user
self.assertTrue("name" not in data_user)
self.assertTrue("id" in data_user)
def test_get_serializer_data_groups(self):
data_group = self.parameter.data_group
self.assertTrue("id" not in data_group)
self.assertEqual(data_group["events"]["data"]["id"]["human_name"],
"ID")
def test_fire(self):
self.event.fire(param_name=CategoryTestSerializer(self.category))
self.assertEqual(models.EventNotificationFire.objects.all().count(), 1)
self.assertEqual(models.Notification.objects.all().count(), 6)
self.event.fire(param_name=self.category)
self.assertEqual(models.EventNotificationFire.objects.all().count(), 2)
self.notification.collective = True
self.notification.save()
PyNot.event('slug_event').fire(param_name=self.category)
self.assertEqual(models.EventNotificationFire.objects.all().count(), 3)
self.assertEqual(models.Notification.objects.all().count(), 17)
class EventNotificationTestCase(DetailAPITestCaseMixin,
WriteRESTAPITestCaseMixin,
BaseRESTAPITestCase):
base_name = 'eventnotification'
factory_class = EventNotificationFactory
lookup_field = 'id'
attributes_to_check = ['id', 'name', 'subject', 'message']
create_data = {'name' : 'Test notification',
'subject' : 'Test notification',
'message' : 'Test notification'}
update_data = {'name': 'Test notification updated',
'subject': 'Test notification updated',
'message': 'Test notification updated',
'recipients': []}
def setUp(self):
admin=get_user_model().objects.create_superuser(
email="admin@example.com", password="admin")
token = Token.objects.get_or_create(user=admin)[0].key
headers = {'HTTP_AUTHORIZATION': 'Token ' + token}
self.client.credentials(**headers)
super(EventNotificationTestCase, self).setUp()
def get_create_data(self):
data = self.create_data
data['event']=self.object.event_id
return data
| true
| true
|
1c49a7fda9f17643c78c4e525ad84da9839557ec
| 7,469
|
py
|
Python
|
FastRCNN/BrainScript/PARAMETERS.py
|
jakkaj/CNTK_AMLWorkbench
|
27a496c665f2565e15450da8743807f528326d5e
|
[
"MIT"
] | 6
|
2017-11-16T21:26:38.000Z
|
2020-05-04T21:06:10.000Z
|
FastRCNN/BrainScript/PARAMETERS.py
|
jakkaj/CNTK_AMLWorkbench
|
27a496c665f2565e15450da8743807f528326d5e
|
[
"MIT"
] | null | null | null |
FastRCNN/BrainScript/PARAMETERS.py
|
jakkaj/CNTK_AMLWorkbench
|
27a496c665f2565e15450da8743807f528326d5e
|
[
"MIT"
] | 2
|
2017-11-22T05:42:27.000Z
|
2018-12-31T11:18:26.000Z
|
from __future__ import print_function
import os
from imdb_data import imdb_data
import fastRCNN, time, datetime
from fastRCNN.pascal_voc import pascal_voc # as nmsPython
print (datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
dataset = "Grocery"
#dataset = "pascalVoc"
#dataset = "pascalVoc_aeroplanesOnly"
#dataset = "CustomDataset"
############################
# default parameters
############################
class Parameters():
def __init__(self, datasetName):
# cntk params
self.datasetName = datasetName
self.cntk_nrRois = 100 # how many ROIs to zero-pad. Use 100 to get quick result. Use 2000 to get good results.
self.cntk_padWidth = 1000
self.cntk_padHeight = 1000
# directories
self.rootDir = os.path.dirname(os.path.abspath(__file__))
self.imgDir = os.path.join(self.rootDir, "..", "..", "..", "DataSets", datasetName)
# derived directories
self.procDir = os.path.join(self.rootDir, "proc", datasetName + "_{}".format(self.cntk_nrRois))
self.resultsDir = os.path.join(self.rootDir, "results", datasetName + "_{}".format(self.cntk_nrRois))
self.roiDir = os.path.join(self.procDir, "rois")
self.cntkFilesDir = os.path.join(self.procDir, "cntkFiles")
self.cntkTemplateDir = self.rootDir
# ROI generation
self.roi_minDimRel = 0.01 # minium relative width/height of a ROI
self.roi_maxDimRel = 1.0 # maximum relative width/height of a ROI
self.roi_minNrPixelsRel = 0 # minium relative area covered by ROI
self.roi_maxNrPixelsRel = 1.0 # maximm relative area covered by ROI
self.roi_maxAspectRatio = 4.0 # maximum aspect Ratio of a ROI vertically and horizontally
self.roi_maxImgDim = 200 # image size used for ROI generation
self.ss_scale = 100 # selective search ROIS: parameter controlling cluster size for segmentation
self.ss_sigma = 1.2 # selective search ROIs: width of gaussian kernal for segmentation
self.ss_minSize = 20 # selective search ROIs: minimum component size for segmentation
self.grid_nrScales = 7 # uniform grid ROIs: number of iterations from largest possible ROI to smaller ROIs
self.grid_aspectRatios = [1.0, 2.0, 0.5] # uniform grid ROIs: aspect ratio of ROIs
# thresholds
self.train_posOverlapThres = 0.5 # threshold for marking ROIs as positive.
self.nmsThreshold = 0.3 # Non-Maxima suppression threshold (in range [0,1]).
# The lower the more ROIs will be combined. Used in 5_evaluateResults and 5_visualizeResults.
self.cntk_num_train_images = -1 # set per data set below
self.cntk_num_test_images = -1 # set per data set below
self.cntk_mb_size = -1 # set per data set below
self.cntk_max_epochs = -1 # set per data set below
self.cntk_momentum_time_constant = -1 # set per data set below
############################
# project-specific parameters
############################
class GroceryParameters(Parameters):
def __init__(self, datasetName):
super(GroceryParameters,self).__init__(datasetName)
self.classes = ('__background__', # always index 0
'avocado', 'orange', 'butter', 'champagne', 'eggBox', 'gerkin', 'joghurt', 'ketchup',
'orangeJuice', 'onion', 'pepper', 'tomato', 'water', 'milk', 'tabasco', 'mustard')
# roi generation
self.roi_minDimRel = 0.04
self.roi_maxDimRel = 0.4
self.roi_minNrPixelsRel = 2 * self.roi_minDimRel * self.roi_minDimRel
self.roi_maxNrPixelsRel = 0.33 * self.roi_maxDimRel * self.roi_maxDimRel
# model training / scoring
self.classifier = 'nn'
self.cntk_num_train_images = 25
self.cntk_num_test_images = 5
self.cntk_mb_size = 5
self.cntk_max_epochs = 20
self.cntk_momentum_time_constant = 10
# postprocessing
self.nmsThreshold = 0.01
# database
self.imdbs = dict() # database provider of images and image annotations
for image_set in ["train", "test"]:
self.imdbs[image_set] = imdb_data(image_set, self.classes, self.cntk_nrRois, self.imgDir, self.roiDir, self.cntkFilesDir, boAddGroundTruthRois=(image_set!='test'))
class CustomDataset(Parameters):
def __init__(self, datasetName):
super(CustomDataset,self).__init__(datasetName)
class PascalParameters(Parameters):
def __init__(self, datasetName):
super(PascalParameters,self).__init__(datasetName)
if datasetName.startswith("pascalVoc_aeroplanesOnly"):
self.classes = ('__background__', 'aeroplane')
self.lutImageSet = {"train": "trainval.aeroplaneOnly", "test": "test.aeroplaneOnly"}
else:
self.classes = ('__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable',
'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')
self.lutImageSet = {"train": "trainval", "test": "test"}
# use cntk_nrRois = 4000. more than 99% of the test images have less than 4000 rois, but 50% more than 2000
# model training / scoring
self.classifier = 'nn'
self.cntk_num_train_images = 5011
self.cntk_num_test_images = 4952
self.cntk_mb_size = 2
self.cntk_max_epochs = 17
self.cntk_momentum_time_constant = 20
self.pascalDataDir = os.path.join(self.rootDir, "..", "..", "DataSets", "Pascal")
self.imgDir = self.pascalDataDir
# database
self.imdbs = dict()
for image_set, year in zip(["train", "test"], ["2007", "2007"]):
self.imdbs[image_set] = fastRCNN.pascal_voc(self.lutImageSet[image_set], year, self.classes, self.cntk_nrRois, cacheDir=self.cntkFilesDir, devkit_path=self.pascalDataDir)
print ("Number of {} images: {}".format(image_set, self.imdbs[image_set].num_images))
def get_parameters_for_dataset(datasetName=dataset):
if datasetName == "Grocery":
parameters = GroceryParameters(datasetName)
elif datasetName.startswith("pascalVoc"):
parameters = PascalParameters(datasetName)
elif dataset.Name == "CustomDataset":
parameters = CustomDataset(datasetName)
else:
ERROR
############################
# computed parameters
############################
nrClasses = len(parameters.classes)
parameters.cntk_featureDimensions = {'nn': nrClasses}
parameters.nrClasses = nrClasses
assert parameters.cntk_padWidth == parameters.cntk_padHeight, "ERROR: different width and height for padding currently not supported."
assert parameters.classifier.lower() in ['svm','nn'], "ERROR: only 'nn' or 'svm' classifier supported."
assert not (parameters.datasetName == 'pascalVoc' and parameters.classifier == 'svm'), "ERROR: while technically possibly, writing 2nd-last layer of CNTK model for all pascalVOC images takes too much disk memory."
print ("PARAMETERS: datasetName = " + datasetName)
print ("PARAMETERS: cntk_nrRois = {}".format(parameters.cntk_nrRois))
return parameters
| 49.463576
| 217
| 0.641317
|
from __future__ import print_function
import os
from imdb_data import imdb_data
import fastRCNN, time, datetime
from fastRCNN.pascal_voc import pascal_voc
print (datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
dataset = "Grocery"
Dir = self.rootDir
self.roi_minDimRel = 0.01
self.roi_maxDimRel = 1.0
self.roi_minNrPixelsRel = 0
self.roi_maxNrPixelsRel = 1.0
self.roi_maxAspectRatio = 4.0
self.roi_maxImgDim = 200
self.ss_scale = 100
self.ss_sigma = 1.2
self.ss_minSize = 20
self.grid_nrScales = 7
self.grid_aspectRatios = [1.0, 2.0, 0.5]
self.train_posOverlapThres = 0.5
self.nmsThreshold = 0.3
self.cntk_num_train_images = -1
self.cntk_num_test_images = -1
self.cntk_mb_size = -1
self.cntk_max_epochs = -1
self.cntk_momentum_time_constant = -1
lf.cntk_mb_size = 5
self.cntk_max_epochs = 20
self.cntk_momentum_time_constant = 10
self.nmsThreshold = 0.01
self.imdbs = dict()
for image_set in ["train", "test"]:
self.imdbs[image_set] = imdb_data(image_set, self.classes, self.cntk_nrRois, self.imgDir, self.roiDir, self.cntkFilesDir, boAddGroundTruthRois=(image_set!='test'))
class CustomDataset(Parameters):
def __init__(self, datasetName):
super(CustomDataset,self).__init__(datasetName)
class PascalParameters(Parameters):
def __init__(self, datasetName):
super(PascalParameters,self).__init__(datasetName)
if datasetName.startswith("pascalVoc_aeroplanesOnly"):
self.classes = ('__background__', 'aeroplane')
self.lutImageSet = {"train": "trainval.aeroplaneOnly", "test": "test.aeroplaneOnly"}
else:
self.classes = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable',
'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')
self.lutImageSet = {"train": "trainval", "test": "test"}
self.classifier = 'nn'
self.cntk_num_train_images = 5011
self.cntk_num_test_images = 4952
self.cntk_mb_size = 2
self.cntk_max_epochs = 17
self.cntk_momentum_time_constant = 20
self.pascalDataDir = os.path.join(self.rootDir, "..", "..", "DataSets", "Pascal")
self.imgDir = self.pascalDataDir
self.imdbs = dict()
for image_set, year in zip(["train", "test"], ["2007", "2007"]):
self.imdbs[image_set] = fastRCNN.pascal_voc(self.lutImageSet[image_set], year, self.classes, self.cntk_nrRois, cacheDir=self.cntkFilesDir, devkit_path=self.pascalDataDir)
print ("Number of {} images: {}".format(image_set, self.imdbs[image_set].num_images))
def get_parameters_for_dataset(datasetName=dataset):
if datasetName == "Grocery":
parameters = GroceryParameters(datasetName)
elif datasetName.startswith("pascalVoc"):
parameters = PascalParameters(datasetName)
elif dataset.Name == "CustomDataset":
parameters = CustomDataset(datasetName)
else:
ERROR
rameters
| true
| true
|
1c49ab770d5e3e8175eecd2530c65d772f092a4d
| 3,811
|
py
|
Python
|
util/lintlib.py
|
Xanewok/rust-clippy
|
9d1792a4265c3645d716c5bf085c07be8749332a
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1
|
2019-05-14T09:10:46.000Z
|
2019-05-14T09:10:46.000Z
|
util/lintlib.py
|
Xanewok/rust-clippy
|
9d1792a4265c3645d716c5bf085c07be8749332a
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
util/lintlib.py
|
Xanewok/rust-clippy
|
9d1792a4265c3645d716c5bf085c07be8749332a
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Common utils for the several housekeeping scripts.
import os
import re
import collections
import logging as log
log.basicConfig(level=log.INFO, format='%(levelname)s: %(message)s')
Lint = collections.namedtuple('Lint', 'name level doc sourcefile group')
Config = collections.namedtuple('Config', 'name ty doc default')
lintname_re = re.compile(r'''pub\s+([A-Z_][A-Z_0-9]*)''')
group_re = re.compile(r'''\s*([a-z_][a-z_0-9]+)''')
conf_re = re.compile(r'''define_Conf! {\n([^}]*)\n}''', re.MULTILINE)
confvar_re = re.compile(
r'''/// Lint: (\w+). (.*).*\n\s*\([^,]+,\s+"([^"]+)",\s+([^=\)]+)=>\s+(.*)\),''', re.MULTILINE)
lint_levels = {
"correctness": 'Deny',
"style": 'Warn',
"complexity": 'Warn',
"perf": 'Warn',
"restriction": 'Allow',
"pedantic": 'Allow',
"nursery": 'Allow',
"cargo": 'Allow',
}
def parse_lints(lints, filepath):
last_comment = []
comment = True
clippy = False
deprecated = False
name = ""
with open(filepath) as fp:
for line in fp:
if comment:
if line.startswith("/// "):
last_comment.append(line[4:])
elif line.startswith("///"):
last_comment.append(line[3:])
elif line.startswith("declare_lint!"):
import sys
print("don't use `declare_lint!` in Clippy, use `declare_clippy_lint!` instead")
sys.exit(42)
elif line.startswith("declare_clippy_lint!"):
comment = False
deprecated = False
clippy = True
name = ""
elif line.startswith("declare_deprecated_lint!"):
comment = False
deprecated = True
clippy = False
else:
last_comment = []
if not comment:
m = lintname_re.search(line)
if m:
name = m.group(1).lower()
line = next(fp)
if deprecated:
level = "Deprecated"
group = "deprecated"
else:
while True:
g = group_re.search(line)
if g:
group = g.group(1).lower()
level = lint_levels.get(group, None)
break
line = next(fp)
if level is None:
continue
log.info("found %s with level %s in %s",
name, level, filepath)
lints.append(Lint(name, level, last_comment, filepath, group))
last_comment = []
comment = True
if "}" in line:
log.warn("Warning: missing Lint-Name in %s", filepath)
comment = True
def parse_configs(path):
configs = {}
with open(os.path.join(path, 'utils/conf.rs')) as fp:
contents = fp.read()
match = re.search(conf_re, contents)
confvars = re.findall(confvar_re, match.group(1))
for (lint, doc, name, default, ty) in confvars:
configs[lint.lower()] = Config(name.replace("_", "-"), ty, doc, default)
return configs
def parse_all(path="clippy_lints/src"):
lints = []
for root, dirs, files in os.walk(path):
for fn in files:
if fn.endswith('.rs'):
parse_lints(lints, os.path.join(root, fn))
log.info("got %s lints", len(lints))
configs = parse_configs(path)
log.info("got %d configs", len(configs))
return lints, configs
| 31.758333
| 100
| 0.479402
|
import os
import re
import collections
import logging as log
log.basicConfig(level=log.INFO, format='%(levelname)s: %(message)s')
Lint = collections.namedtuple('Lint', 'name level doc sourcefile group')
Config = collections.namedtuple('Config', 'name ty doc default')
lintname_re = re.compile(r'''pub\s+([A-Z_][A-Z_0-9]*)''')
group_re = re.compile(r'''\s*([a-z_][a-z_0-9]+)''')
conf_re = re.compile(r'''define_Conf! {\n([^}]*)\n}''', re.MULTILINE)
confvar_re = re.compile(
r'''/// Lint: (\w+). (.*).*\n\s*\([^,]+,\s+"([^"]+)",\s+([^=\)]+)=>\s+(.*)\),''', re.MULTILINE)
lint_levels = {
"correctness": 'Deny',
"style": 'Warn',
"complexity": 'Warn',
"perf": 'Warn',
"restriction": 'Allow',
"pedantic": 'Allow',
"nursery": 'Allow',
"cargo": 'Allow',
}
def parse_lints(lints, filepath):
last_comment = []
comment = True
clippy = False
deprecated = False
name = ""
with open(filepath) as fp:
for line in fp:
if comment:
if line.startswith("/// "):
last_comment.append(line[4:])
elif line.startswith("///"):
last_comment.append(line[3:])
elif line.startswith("declare_lint!"):
import sys
print("don't use `declare_lint!` in Clippy, use `declare_clippy_lint!` instead")
sys.exit(42)
elif line.startswith("declare_clippy_lint!"):
comment = False
deprecated = False
clippy = True
name = ""
elif line.startswith("declare_deprecated_lint!"):
comment = False
deprecated = True
clippy = False
else:
last_comment = []
if not comment:
m = lintname_re.search(line)
if m:
name = m.group(1).lower()
line = next(fp)
if deprecated:
level = "Deprecated"
group = "deprecated"
else:
while True:
g = group_re.search(line)
if g:
group = g.group(1).lower()
level = lint_levels.get(group, None)
break
line = next(fp)
if level is None:
continue
log.info("found %s with level %s in %s",
name, level, filepath)
lints.append(Lint(name, level, last_comment, filepath, group))
last_comment = []
comment = True
if "}" in line:
log.warn("Warning: missing Lint-Name in %s", filepath)
comment = True
def parse_configs(path):
configs = {}
with open(os.path.join(path, 'utils/conf.rs')) as fp:
contents = fp.read()
match = re.search(conf_re, contents)
confvars = re.findall(confvar_re, match.group(1))
for (lint, doc, name, default, ty) in confvars:
configs[lint.lower()] = Config(name.replace("_", "-"), ty, doc, default)
return configs
def parse_all(path="clippy_lints/src"):
lints = []
for root, dirs, files in os.walk(path):
for fn in files:
if fn.endswith('.rs'):
parse_lints(lints, os.path.join(root, fn))
log.info("got %s lints", len(lints))
configs = parse_configs(path)
log.info("got %d configs", len(configs))
return lints, configs
| true
| true
|
1c49abbc287cae05a51c953706e6233eceda83d3
| 682
|
py
|
Python
|
qiskit/providers/ibmq/exceptions.py
|
delapuente/qiskit-ibmq-provider
|
03322e8df52217ddb91c96f437dbeecebc4564ee
|
[
"Apache-2.0"
] | null | null | null |
qiskit/providers/ibmq/exceptions.py
|
delapuente/qiskit-ibmq-provider
|
03322e8df52217ddb91c96f437dbeecebc4564ee
|
[
"Apache-2.0"
] | null | null | null |
qiskit/providers/ibmq/exceptions.py
|
delapuente/qiskit-ibmq-provider
|
03322e8df52217ddb91c96f437dbeecebc4564ee
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""Exception for the IBMQ module."""
from qiskit.exceptions import QiskitError
class IBMQError(QiskitError):
"""Base class for errors raised by the IBMQ provider module."""
pass
class IBMQAccountError(IBMQError):
"""Base class for errors raised by account management."""
pass
class IBMQBackendError(IBMQError):
"""IBM Q Backend Errors"""
pass
class IBMQBackendValueError(IBMQError, ValueError):
"""Value errors thrown within IBMQBackend """
pass
| 22
| 77
| 0.714076
|
from qiskit.exceptions import QiskitError
class IBMQError(QiskitError):
pass
class IBMQAccountError(IBMQError):
pass
class IBMQBackendError(IBMQError):
pass
class IBMQBackendValueError(IBMQError, ValueError):
pass
| true
| true
|
1c49ac44226dae72e7824740af6a64b46bbf9717
| 21,771
|
py
|
Python
|
tests/test_ft_taxii.py
|
zul126/minemeld-core
|
2eb9b9bfd7654aee57aabd5fb280d4e89a438daf
|
[
"Apache-2.0"
] | 147
|
2016-07-22T18:15:49.000Z
|
2022-03-26T23:32:44.000Z
|
tests/test_ft_taxii.py
|
zul126/minemeld-core
|
2eb9b9bfd7654aee57aabd5fb280d4e89a438daf
|
[
"Apache-2.0"
] | 167
|
2016-07-27T07:02:25.000Z
|
2021-12-16T16:26:52.000Z
|
tests/test_ft_taxii.py
|
zul126/minemeld-core
|
2eb9b9bfd7654aee57aabd5fb280d4e89a438daf
|
[
"Apache-2.0"
] | 112
|
2016-07-22T07:14:29.000Z
|
2022-03-24T18:43:12.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2016 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FT TAXII tests
Unit tests for minemeld.ft.taxii
"""
import gevent.monkey
gevent.monkey.patch_all(thread=False, select=False)
import unittest
import mock
import redis
import gevent
import greenlet
import time
import xmltodict
import os
import libtaxii.constants
import re
import lz4
import json
import minemeld.ft.taxii
import minemeld.ft
FTNAME = 'testft-%d' % int(time.time())
MYDIR = os.path.dirname(__file__)
class MockTaxiiContentBlock(object):
def __init__(self, stix_xml):
class _Binding(object):
def __init__(self, id_):
self.binding_id = id_
self.content = stix_xml
self.content_binding = _Binding(libtaxii.constants.CB_STIX_XML_111)
class MineMeldFTTaxiiTests(unittest.TestCase):
@mock.patch.object(gevent, 'Greenlet')
def test_taxiiclient_parse(self, glet_mock):
config = {
'side_config': 'dummy.yml',
'ca_file': 'dummy.crt'
}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.TaxiiClient(FTNAME, chassis, config)
inputs = []
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
testfiles = os.listdir(MYDIR)
testfiles = filter(
lambda x: x.startswith('test_ft_taxii_stix_package_'),
testfiles
)
for t in testfiles:
with open(os.path.join(MYDIR, t), 'r') as f:
sxml = f.read()
mo = re.match('test_ft_taxii_stix_package_([A-Za-z0-9]+)_([0-9]+)_.*', t)
self.assertNotEqual(mo, None)
type_ = mo.group(1)
num_indicators = int(mo.group(2))
stix_objects = {
'observables': {},
'indicators': {},
'ttps': {}
}
content_blocks = [
MockTaxiiContentBlock(sxml)
]
b._handle_content_blocks(
content_blocks,
stix_objects
)
params = {
'ttps': stix_objects['ttps'],
'observables': stix_objects['observables']
}
indicators = [[iid, iv, params] for iid, iv in stix_objects['indicators'].iteritems()]
for i in indicators:
result = b._process_item(i)
self.assertEqual(len(result), num_indicators)
if type_ != 'any':
for r in result:
self.assertEqual(r[1]['type'], type_)
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_init(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
self.assertEqual(b.name, FTNAME)
self.assertEqual(b.chassis, chassis)
self.assertEqual(b.config, config)
self.assertItemsEqual(b.inputs, [])
self.assertEqual(b.output, None)
self.assertEqual(b.redis_skey, FTNAME)
self.assertEqual(b.redis_skey_chkp, FTNAME+'.chkp')
self.assertEqual(b.redis_skey_value, FTNAME+'.value')
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_update_ip(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
# __init__ + get chkp + delete chkp
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
# unicast
b.filtered_update(
'a',
indicator='1.1.1.1',
value={
'type': 'IPv4',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.uncompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.1')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
SR_mock.reset_mock()
# CIDR
b.filtered_update(
'a',
indicator='1.1.1.0/24',
value={
'type': 'IPv4',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.uncompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.0/24')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
SR_mock.reset_mock()
# fake range
b.filtered_update(
'a',
indicator='1.1.1.1-1.1.1.1',
value={
'type': 'IPv4',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.uncompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.1')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
SR_mock.reset_mock()
# fake range 2
b.filtered_update(
'a',
indicator='1.1.1.0-1.1.1.31',
value={
'type': 'IPv4',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.uncompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.0/27')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
# real range
b.filtered_update(
'a',
indicator='1.1.1.0-1.1.1.33',
value={
'type': 'IPv4',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.uncompress(args[2][3:]))
indicator = stixdict['indicators']
cyboxprops = indicator[0]['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.0/27')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
cyboxprops = indicator[1]['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.32/31')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
SR_mock.reset_mock()
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_update_domain(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
# __init__ + get chkp + delete chkp
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
# unicast
b.filtered_update(
'a',
indicator='example.com',
value={
'type': 'domain',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['value'], 'example.com')
self.assertEqual(cyboxprops['type'], 'FQDN')
SR_mock.reset_mock()
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_update_url(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
# __init__ + get chkp + delete chkp
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
# unicast
b.filtered_update(
'a',
indicator='www.example.com/admin.php',
value={
'type': 'URL',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['type'], 'URL')
self.assertEqual(cyboxprops['value'], 'www.example.com/admin.php')
SR_mock.reset_mock()
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_unicode_url(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
# __init__ + get chkp + delete chkp
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
# unicast
b.filtered_update(
'a',
indicator=u'☃.net/påth',
value={
'type': 'URL',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['type'], 'URL')
self.assertEqual(cyboxprops['value'], u'\u2603.net/p\xe5th')
SR_mock.reset_mock()
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_overflow(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
# __init__ + get chkp + delete chkp
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = b.max_entries
# unicast
b.filtered_update(
'a',
indicator=u'☃.net/påth',
value={
'type': 'URL',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
self.fail(msg='hset found')
self.assertEqual(b.statistics['drop.overflow'], 1)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = b.max_entries - 1
# unicast
b.filtered_update(
'a',
indicator=u'☃.net/påth',
value={
'type': 'URL',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['type'], 'URL')
self.assertEqual(cyboxprops['value'], u'\u2603.net/p\xe5th')
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_update_hash(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
# __init__ + get chkp + delete chkp
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
# sha1
b.filtered_update(
'a',
indicator='a6a5418b4d67d9f3a33cbf184b25ac7f9fa87d33',
value={
'type': 'sha1',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['hashes'][0]['simple_hash_value'], 'a6a5418b4d67d9f3a33cbf184b25ac7f9fa87d33')
self.assertEqual(cyboxprops['hashes'][0]['type']['value'], 'SHA1')
SR_mock.reset_mock()
# md5
b.filtered_update(
'a',
indicator='e23fadd6ceef8c618fc1c65191d846fa',
value={
'type': 'md5',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['hashes'][0]['simple_hash_value'], 'e23fadd6ceef8c618fc1c65191d846fa')
self.assertEqual(cyboxprops['hashes'][0]['type']['value'], 'MD5')
SR_mock.reset_mock()
# sha256
b.filtered_update(
'a',
indicator='a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9',
value={
'type': 'sha256',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['hashes'][0]['simple_hash_value'], 'a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9')
self.assertEqual(cyboxprops['hashes'][0]['type']['value'], 'SHA256')
SR_mock.reset_mock()
b.stop()
| 32.253333
| 138
| 0.558495
|
import gevent.monkey
gevent.monkey.patch_all(thread=False, select=False)
import unittest
import mock
import redis
import gevent
import greenlet
import time
import xmltodict
import os
import libtaxii.constants
import re
import lz4
import json
import minemeld.ft.taxii
import minemeld.ft
FTNAME = 'testft-%d' % int(time.time())
MYDIR = os.path.dirname(__file__)
class MockTaxiiContentBlock(object):
def __init__(self, stix_xml):
class _Binding(object):
def __init__(self, id_):
self.binding_id = id_
self.content = stix_xml
self.content_binding = _Binding(libtaxii.constants.CB_STIX_XML_111)
class MineMeldFTTaxiiTests(unittest.TestCase):
@mock.patch.object(gevent, 'Greenlet')
def test_taxiiclient_parse(self, glet_mock):
config = {
'side_config': 'dummy.yml',
'ca_file': 'dummy.crt'
}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.TaxiiClient(FTNAME, chassis, config)
inputs = []
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
testfiles = os.listdir(MYDIR)
testfiles = filter(
lambda x: x.startswith('test_ft_taxii_stix_package_'),
testfiles
)
for t in testfiles:
with open(os.path.join(MYDIR, t), 'r') as f:
sxml = f.read()
mo = re.match('test_ft_taxii_stix_package_([A-Za-z0-9]+)_([0-9]+)_.*', t)
self.assertNotEqual(mo, None)
type_ = mo.group(1)
num_indicators = int(mo.group(2))
stix_objects = {
'observables': {},
'indicators': {},
'ttps': {}
}
content_blocks = [
MockTaxiiContentBlock(sxml)
]
b._handle_content_blocks(
content_blocks,
stix_objects
)
params = {
'ttps': stix_objects['ttps'],
'observables': stix_objects['observables']
}
indicators = [[iid, iv, params] for iid, iv in stix_objects['indicators'].iteritems()]
for i in indicators:
result = b._process_item(i)
self.assertEqual(len(result), num_indicators)
if type_ != 'any':
for r in result:
self.assertEqual(r[1]['type'], type_)
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_init(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
self.assertEqual(b.name, FTNAME)
self.assertEqual(b.chassis, chassis)
self.assertEqual(b.config, config)
self.assertItemsEqual(b.inputs, [])
self.assertEqual(b.output, None)
self.assertEqual(b.redis_skey, FTNAME)
self.assertEqual(b.redis_skey_chkp, FTNAME+'.chkp')
self.assertEqual(b.redis_skey_value, FTNAME+'.value')
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_update_ip(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
b.filtered_update(
'a',
indicator='1.1.1.1',
value={
'type': 'IPv4',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.uncompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.1')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
SR_mock.reset_mock()
b.filtered_update(
'a',
indicator='1.1.1.0/24',
value={
'type': 'IPv4',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.uncompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.0/24')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
SR_mock.reset_mock()
b.filtered_update(
'a',
indicator='1.1.1.1-1.1.1.1',
value={
'type': 'IPv4',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.uncompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.1')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
SR_mock.reset_mock()
b.filtered_update(
'a',
indicator='1.1.1.0-1.1.1.31',
value={
'type': 'IPv4',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.uncompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.0/27')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
b.filtered_update(
'a',
indicator='1.1.1.0-1.1.1.33',
value={
'type': 'IPv4',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.uncompress(args[2][3:]))
indicator = stixdict['indicators']
cyboxprops = indicator[0]['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.0/27')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
cyboxprops = indicator[1]['observable']['object']['properties']
self.assertEqual(cyboxprops['address_value'], '1.1.1.32/31')
self.assertEqual(cyboxprops['xsi:type'], 'AddressObjectType')
SR_mock.reset_mock()
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_update_domain(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
b.filtered_update(
'a',
indicator='example.com',
value={
'type': 'domain',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['value'], 'example.com')
self.assertEqual(cyboxprops['type'], 'FQDN')
SR_mock.reset_mock()
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_update_url(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
b.filtered_update(
'a',
indicator='www.example.com/admin.php',
value={
'type': 'URL',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['type'], 'URL')
self.assertEqual(cyboxprops['value'], 'www.example.com/admin.php')
SR_mock.reset_mock()
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_unicode_url(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
b.filtered_update(
'a',
indicator=u'☃.net/påth',
value={
'type': 'URL',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['type'], 'URL')
self.assertEqual(cyboxprops['value'], u'\u2603.net/p\xe5th')
SR_mock.reset_mock()
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_overflow(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = b.max_entries
b.filtered_update(
'a',
indicator=u'☃.net/påth',
value={
'type': 'URL',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
self.fail(msg='hset found')
self.assertEqual(b.statistics['drop.overflow'], 1)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = b.max_entries - 1
b.filtered_update(
'a',
indicator=u'☃.net/påth',
value={
'type': 'URL',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['type'], 'URL')
self.assertEqual(cyboxprops['value'], u'\u2603.net/p\xe5th')
b.stop()
@mock.patch.object(redis, 'StrictRedis')
@mock.patch.object(gevent, 'Greenlet')
def test_datafeed_update_hash(self, glet_mock, SR_mock):
config = {}
chassis = mock.Mock()
chassis.request_sub_channel.return_value = None
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
chassis.request_rpc_channel.return_value = None
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
b = minemeld.ft.taxii.DataFeed(FTNAME, chassis, config)
inputs = ['a']
output = False
b.connect(inputs, output)
b.mgmtbus_initialize()
b.start()
self.assertEqual(len(SR_mock.mock_calls), 6)
SR_mock.reset_mock()
SR_mock.return_value.zcard.return_value = 1
b.filtered_update(
'a',
indicator='a6a5418b4d67d9f3a33cbf184b25ac7f9fa87d33',
value={
'type': 'sha1',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['hashes'][0]['simple_hash_value'], 'a6a5418b4d67d9f3a33cbf184b25ac7f9fa87d33')
self.assertEqual(cyboxprops['hashes'][0]['type']['value'], 'SHA1')
SR_mock.reset_mock()
b.filtered_update(
'a',
indicator='e23fadd6ceef8c618fc1c65191d846fa',
value={
'type': 'md5',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['hashes'][0]['simple_hash_value'], 'e23fadd6ceef8c618fc1c65191d846fa')
self.assertEqual(cyboxprops['hashes'][0]['type']['value'], 'MD5')
SR_mock.reset_mock()
b.filtered_update(
'a',
indicator='a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9',
value={
'type': 'sha256',
'confidence': 100,
'share_level': 'green',
'sources': ['test.1']
}
)
for call in SR_mock.mock_calls:
name, args, kwargs = call
if name == '().pipeline().__enter__().hset':
break
else:
self.fail(msg='hset not found')
self.assertEqual(args[2].startswith('lz4'), True)
stixdict = json.loads(lz4.decompress(args[2][3:]))
indicator = stixdict['indicators'][0]
cyboxprops = indicator['observable']['object']['properties']
self.assertEqual(cyboxprops['hashes'][0]['simple_hash_value'], 'a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9')
self.assertEqual(cyboxprops['hashes'][0]['type']['value'], 'SHA256')
SR_mock.reset_mock()
b.stop()
| true
| true
|
1c49acb962d8f0d4657693061d94007f4a560623
| 1,487
|
py
|
Python
|
tests/writers/test_boto3_stubs_package.py
|
greut/mypy_boto3_builder
|
e3d7fb4bbfbef72f173414bc6f7f9ed992c58333
|
[
"MIT"
] | null | null | null |
tests/writers/test_boto3_stubs_package.py
|
greut/mypy_boto3_builder
|
e3d7fb4bbfbef72f173414bc6f7f9ed992c58333
|
[
"MIT"
] | null | null | null |
tests/writers/test_boto3_stubs_package.py
|
greut/mypy_boto3_builder
|
e3d7fb4bbfbef72f173414bc6f7f9ed992c58333
|
[
"MIT"
] | null | null | null |
import tempfile
from pathlib import Path
from unittest.mock import MagicMock, patch
from mypy_boto3_builder.writers.boto3_stubs_package import write_boto3_stubs_package
class TestBoto3StubsPackage:
@patch("mypy_boto3_builder.writers.boto3_stubs_package.sort_imports")
@patch("mypy_boto3_builder.writers.boto3_stubs_package.blackify")
@patch("mypy_boto3_builder.writers.boto3_stubs_package.render_jinja2_template")
def test_write_master_package(
self,
render_jinja2_template_mock: MagicMock,
blackify_mock: MagicMock,
sort_imports_mock: MagicMock,
) -> None:
package_mock = MagicMock()
package_mock.name = "package"
package_mock.service_name.module_name = "module"
blackify_mock.return_value = "blackify"
sort_imports_mock.return_value = "sort_imports"
render_jinja2_template_mock.return_value = "render_jinja2_template_mock"
with tempfile.TemporaryDirectory() as output_dir:
output_path = Path(output_dir)
result = write_boto3_stubs_package(package_mock, output_path, True)
assert len(result) == 29
assert result[0].name == "setup.py"
render_jinja2_template_mock.assert_called_with(
Path("boto3-stubs/boto3-stubs/version.py.jinja2"),
package=package_mock,
)
assert len(blackify_mock.mock_calls) == 6
assert len(sort_imports_mock.mock_calls) == 6
| 40.189189
| 84
| 0.705447
|
import tempfile
from pathlib import Path
from unittest.mock import MagicMock, patch
from mypy_boto3_builder.writers.boto3_stubs_package import write_boto3_stubs_package
class TestBoto3StubsPackage:
@patch("mypy_boto3_builder.writers.boto3_stubs_package.sort_imports")
@patch("mypy_boto3_builder.writers.boto3_stubs_package.blackify")
@patch("mypy_boto3_builder.writers.boto3_stubs_package.render_jinja2_template")
def test_write_master_package(
self,
render_jinja2_template_mock: MagicMock,
blackify_mock: MagicMock,
sort_imports_mock: MagicMock,
) -> None:
package_mock = MagicMock()
package_mock.name = "package"
package_mock.service_name.module_name = "module"
blackify_mock.return_value = "blackify"
sort_imports_mock.return_value = "sort_imports"
render_jinja2_template_mock.return_value = "render_jinja2_template_mock"
with tempfile.TemporaryDirectory() as output_dir:
output_path = Path(output_dir)
result = write_boto3_stubs_package(package_mock, output_path, True)
assert len(result) == 29
assert result[0].name == "setup.py"
render_jinja2_template_mock.assert_called_with(
Path("boto3-stubs/boto3-stubs/version.py.jinja2"),
package=package_mock,
)
assert len(blackify_mock.mock_calls) == 6
assert len(sort_imports_mock.mock_calls) == 6
| true
| true
|
1c49acdbc39a4f246a5459a9ce974252a307a5f5
| 3,326
|
py
|
Python
|
spec2nii/nifti_orientation.py
|
NeutralKaon/spec2nii
|
52f0dc42ad176fdbb173ac051803372909e9971c
|
[
"BSD-3-Clause"
] | 5
|
2020-06-24T08:25:51.000Z
|
2021-06-30T16:49:37.000Z
|
spec2nii/nifti_orientation.py
|
NeutralKaon/spec2nii
|
52f0dc42ad176fdbb173ac051803372909e9971c
|
[
"BSD-3-Clause"
] | 15
|
2021-11-15T14:57:24.000Z
|
2022-03-25T10:07:47.000Z
|
spec2nii/nifti_orientation.py
|
NeutralKaon/spec2nii
|
52f0dc42ad176fdbb173ac051803372909e9971c
|
[
"BSD-3-Clause"
] | 4
|
2020-06-30T16:16:31.000Z
|
2021-08-05T19:13:11.000Z
|
import numpy as np
from scipy.spatial.transform import Rotation
class NIFTIOrient:
def __init__(self, affine):
self.Q44 = affine
qb, qc, qd, qx, qy, qz, dx, dy, dz, qfac = nifti_mat44_to_quatern(affine)
self.qb = qb
self.qc = qc
self.qd = qd
self.qx = qx
self.qy = qy
self.qz = qz
self.dx = dx
self.dy = dy
self.dz = dz
self.qfac = qfac
def calc_affine(angles, dimensions, shift):
scalingMat = np.diag(dimensions)
rot = Rotation.from_euler('xyz', angles, degrees=True)
m33 = rot.as_matrix() @ scalingMat
m44 = np.zeros((4, 4))
m44[0:3, 0:3] = m33
m44[3, 3] = 1.0
m44[0:3, 3] = shift
return m44
def nifti_mat44_to_quatern(R):
"""4x4 affine to quaternion representation."""
# offset outputs are read out of input matrix
qx = R[0, 3]
qy = R[1, 3]
qz = R[2, 3]
# load 3x3 matrix into local variables
r11 = R[0, 0]
r12 = R[0, 1]
r13 = R[0, 2]
r21 = R[1, 0]
r22 = R[1, 1]
r23 = R[1, 2]
r31 = R[2, 0]
r32 = R[2, 1]
r33 = R[2, 2]
# compute lengths of each column; these determine grid spacings
xd = np.sqrt(r11 * r11 + r21 * r21 + r31 * r31)
yd = np.sqrt(r12 * r12 + r22 * r22 + r32 * r32)
zd = np.sqrt(r13 * r13 + r23 * r23 + r33 * r33)
# if a column length is zero, patch the trouble
if xd == 0.0:
r11 = 1.0
r21 = 0.0
r31 = 0.0
xd = 1.0
if yd == 0.0:
r22 = 1.0
r12 = 0.0
r32 = 0.0
yd = 1.0
if zd == 0.0:
r33 = 1.0
r13 = 0.0
r23 = 0.0
zd = 1.0
# assign the output lengths
dx = xd
dy = yd
dz = zd
# normalize the columns
r11 /= xd
r21 /= xd
r31 /= xd
r12 /= yd
r22 /= yd
r32 /= yd
r13 /= zd
r23 /= zd
r33 /= zd
zd = r11 * r22 * r33\
- r11 * r32 * r23\
- r21 * r12 * r33\
+ r21 * r32 * r13\
+ r31 * r12 * r23\
- r31 * r22 * r13
# zd should be -1 or 1
if zd > 0: # proper
qfac = 1.0
else: # improper ==> flip 3rd column
qfac = -1.0
r13 *= -1.0
r23 *= -1.0
r33 *= -1.0
# now, compute quaternion parameters
a = r11 + r22 + r33 + 1.0
if a > 0.5: # simplest case
a = 0.5 * np.sqrt(a)
b = 0.25 * (r32 - r23) / a
c = 0.25 * (r13 - r31) / a
d = 0.25 * (r21 - r12) / a
else: # trickier case
xd = 1.0 + r11 - (r22 + r33) # 4*b*b
yd = 1.0 + r22 - (r11 + r33) # 4*c*c
zd = 1.0 + r33 - (r11 + r22) # 4*d*d
if xd > 1.0:
b = 0.5 * np.sqrt(xd)
c = 0.25 * (r12 + r21) / b
d = 0.25 * (r13 + r31) / b
a = 0.25 * (r32 - r23) / b
elif yd > 1.0:
c = 0.5 * np.sqrt(yd)
b = 0.25 * (r12 + r21) / c
d = 0.25 * (r23 + r32) / c
a = 0.25 * (r13 - r31) / c
else:
d = 0.5 * np.sqrt(zd)
b = 0.25 * (r13 + r31) / d
c = 0.25 * (r23 + r32) / d
a = 0.25 * (r21 - r12) / d
if a < 0.0:
b = -b
c = -c
d = -d
qb = b
qc = c
qd = d
return qb, qc, qd, qx, qy, qz, dx, dy, dz, qfac
| 23.422535
| 81
| 0.435057
|
import numpy as np
from scipy.spatial.transform import Rotation
class NIFTIOrient:
def __init__(self, affine):
self.Q44 = affine
qb, qc, qd, qx, qy, qz, dx, dy, dz, qfac = nifti_mat44_to_quatern(affine)
self.qb = qb
self.qc = qc
self.qd = qd
self.qx = qx
self.qy = qy
self.qz = qz
self.dx = dx
self.dy = dy
self.dz = dz
self.qfac = qfac
def calc_affine(angles, dimensions, shift):
scalingMat = np.diag(dimensions)
rot = Rotation.from_euler('xyz', angles, degrees=True)
m33 = rot.as_matrix() @ scalingMat
m44 = np.zeros((4, 4))
m44[0:3, 0:3] = m33
m44[3, 3] = 1.0
m44[0:3, 3] = shift
return m44
def nifti_mat44_to_quatern(R):
qx = R[0, 3]
qy = R[1, 3]
qz = R[2, 3]
r11 = R[0, 0]
r12 = R[0, 1]
r13 = R[0, 2]
r21 = R[1, 0]
r22 = R[1, 1]
r23 = R[1, 2]
r31 = R[2, 0]
r32 = R[2, 1]
r33 = R[2, 2]
xd = np.sqrt(r11 * r11 + r21 * r21 + r31 * r31)
yd = np.sqrt(r12 * r12 + r22 * r22 + r32 * r32)
zd = np.sqrt(r13 * r13 + r23 * r23 + r33 * r33)
if xd == 0.0:
r11 = 1.0
r21 = 0.0
r31 = 0.0
xd = 1.0
if yd == 0.0:
r22 = 1.0
r12 = 0.0
r32 = 0.0
yd = 1.0
if zd == 0.0:
r33 = 1.0
r13 = 0.0
r23 = 0.0
zd = 1.0
dx = xd
dy = yd
dz = zd
r11 /= xd
r21 /= xd
r31 /= xd
r12 /= yd
r22 /= yd
r32 /= yd
r13 /= zd
r23 /= zd
r33 /= zd
zd = r11 * r22 * r33\
- r11 * r32 * r23\
- r21 * r12 * r33\
+ r21 * r32 * r13\
+ r31 * r12 * r23\
- r31 * r22 * r13
if zd > 0:
qfac = 1.0
else:
qfac = -1.0
r13 *= -1.0
r23 *= -1.0
r33 *= -1.0
a = r11 + r22 + r33 + 1.0
if a > 0.5:
a = 0.5 * np.sqrt(a)
b = 0.25 * (r32 - r23) / a
c = 0.25 * (r13 - r31) / a
d = 0.25 * (r21 - r12) / a
else:
xd = 1.0 + r11 - (r22 + r33)
yd = 1.0 + r22 - (r11 + r33)
zd = 1.0 + r33 - (r11 + r22)
if xd > 1.0:
b = 0.5 * np.sqrt(xd)
c = 0.25 * (r12 + r21) / b
d = 0.25 * (r13 + r31) / b
a = 0.25 * (r32 - r23) / b
elif yd > 1.0:
c = 0.5 * np.sqrt(yd)
b = 0.25 * (r12 + r21) / c
d = 0.25 * (r23 + r32) / c
a = 0.25 * (r13 - r31) / c
else:
d = 0.5 * np.sqrt(zd)
b = 0.25 * (r13 + r31) / d
c = 0.25 * (r23 + r32) / d
a = 0.25 * (r21 - r12) / d
if a < 0.0:
b = -b
c = -c
d = -d
qb = b
qc = c
qd = d
return qb, qc, qd, qx, qy, qz, dx, dy, dz, qfac
| true
| true
|
1c49ad6c5eb75f92292108b9fb7833bf2d72a793
| 4,623
|
py
|
Python
|
tensornetwork/backends/backend_test.py
|
DavidBraun777/TensorNetwork
|
55942a12a859a8c6f8be473e623dbf0ddfd790b5
|
[
"Apache-2.0"
] | null | null | null |
tensornetwork/backends/backend_test.py
|
DavidBraun777/TensorNetwork
|
55942a12a859a8c6f8be473e623dbf0ddfd790b5
|
[
"Apache-2.0"
] | null | null | null |
tensornetwork/backends/backend_test.py
|
DavidBraun777/TensorNetwork
|
55942a12a859a8c6f8be473e623dbf0ddfd790b5
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for graphmode_tensornetwork."""
import builtins
import sys
import pytest
import numpy as np
def clean_tensornetwork_modules():
for mod in list(sys.modules.keys()):
if mod.startswith('tensornetwork'):
sys.modules.pop(mod, None)
@pytest.fixture(autouse=True)
def clean_backend_import():
#never do this outside testing
clean_tensornetwork_modules()
yield # use as teardown
clean_tensornetwork_modules()
@pytest.fixture
def no_backend_dependency(monkeypatch):
import_orig = builtins.__import__
# pylint: disable=redefined-builtin
def mocked_import(name, globals, locals, fromlist, level):
if name in ['torch', 'tensorflow', 'jax']:
raise ImportError()
return import_orig(name, globals, locals, fromlist, level)
monkeypatch.setattr(builtins, '__import__', mocked_import)
@pytest.mark.usefixtures('no_backend_dependency')
def test_backend_pytorch_missing_cannot_initialize_backend():
with pytest.raises(ImportError):
# pylint: disable=import-outside-toplevel
from tensornetwork.backends.pytorch.pytorch_backend import PyTorchBackend
PyTorchBackend()
@pytest.mark.usefixtures('no_backend_dependency')
def test_backend_tensorflow_missing_cannot_initialize_backend():
with pytest.raises(ImportError):
# pylint: disable=import-outside-toplevel
from tensornetwork.backends.tensorflow.tensorflow_backend \
import TensorFlowBackend
TensorFlowBackend()
@pytest.mark.usefixtures('no_backend_dependency')
def test_backend_jax_missing_cannot_initialize_backend():
with pytest.raises(ImportError):
# pylint: disable=import-outside-toplevel
from tensornetwork.backends.jax.jax_backend import JaxBackend
JaxBackend()
@pytest.mark.usefixtures('no_backend_dependency')
def test_config_backend_missing_can_import_config():
#not sure why config is imported here?
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import tensornetwork.config
with pytest.raises(ImportError):
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import torch
with pytest.raises(ImportError):
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import tensorflow as tf
with pytest.raises(ImportError):
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import jax
@pytest.mark.usefixtures('no_backend_dependency')
def test_import_tensornetwork_without_backends():
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import tensornetwork
#pylint: disable=import-outside-toplevel
import tensornetwork.backends.pytorch.pytorch_backend
#pylint: disable=import-outside-toplevel
import tensornetwork.backends.tensorflow.tensorflow_backend
#pylint: disable=import-outside-toplevel
import tensornetwork.backends.jax.jax_backend
#pylint: disable=import-outside-toplevel
import tensornetwork.backends.numpy.numpy_backend
with pytest.raises(ImportError):
#pylint: disable=import-outside-toplevel
#pylint: disable=unused-variable
import torch
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import tensorflow as tf
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import jax
@pytest.mark.usefixtures('no_backend_dependency')
def test_basic_numpy_network_without_backends():
#pylint: disable=import-outside-toplevel
import tensornetwork
net = tensornetwork.TensorNetwork(backend="numpy")
a = net.add_node(np.ones((10,)))
b = net.add_node(np.ones((10,)))
edge = net.connect(a[0], b[0])
final_node = net.contract(edge)
assert final_node.tensor == np.array(10.)
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import torch
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import tensorflow as tf
with pytest.raises(ImportError):
#pylint: disable=unused-variable
#pylint: disable=import-outside-toplevel
import jax
@pytest.mark.usefixtures('no_backend_dependency')
def test_basic_network_without_backends_raises_error():
#pylint: disable=import-outside-toplevel
import tensornetwork
with pytest.raises(ImportError):
tensornetwork.TensorNetwork(backend="jax")
with pytest.raises(ImportError):
tensornetwork.TensorNetwork(backend="tensorflow")
with pytest.raises(ImportError):
tensornetwork.TensorNetwork(backend="pytorch")
| 32.787234
| 77
| 0.776119
|
import builtins
import sys
import pytest
import numpy as np
def clean_tensornetwork_modules():
for mod in list(sys.modules.keys()):
if mod.startswith('tensornetwork'):
sys.modules.pop(mod, None)
@pytest.fixture(autouse=True)
def clean_backend_import():
clean_tensornetwork_modules()
yield
clean_tensornetwork_modules()
@pytest.fixture
def no_backend_dependency(monkeypatch):
import_orig = builtins.__import__
def mocked_import(name, globals, locals, fromlist, level):
if name in ['torch', 'tensorflow', 'jax']:
raise ImportError()
return import_orig(name, globals, locals, fromlist, level)
monkeypatch.setattr(builtins, '__import__', mocked_import)
@pytest.mark.usefixtures('no_backend_dependency')
def test_backend_pytorch_missing_cannot_initialize_backend():
with pytest.raises(ImportError):
from tensornetwork.backends.pytorch.pytorch_backend import PyTorchBackend
PyTorchBackend()
@pytest.mark.usefixtures('no_backend_dependency')
def test_backend_tensorflow_missing_cannot_initialize_backend():
with pytest.raises(ImportError):
from tensornetwork.backends.tensorflow.tensorflow_backend \
import TensorFlowBackend
TensorFlowBackend()
@pytest.mark.usefixtures('no_backend_dependency')
def test_backend_jax_missing_cannot_initialize_backend():
with pytest.raises(ImportError):
from tensornetwork.backends.jax.jax_backend import JaxBackend
JaxBackend()
@pytest.mark.usefixtures('no_backend_dependency')
def test_config_backend_missing_can_import_config():
import tensornetwork.config
with pytest.raises(ImportError):
import torch
with pytest.raises(ImportError):
import tensorflow as tf
with pytest.raises(ImportError):
import jax
@pytest.mark.usefixtures('no_backend_dependency')
def test_import_tensornetwork_without_backends():
import tensornetwork
import tensornetwork.backends.pytorch.pytorch_backend
import tensornetwork.backends.tensorflow.tensorflow_backend
import tensornetwork.backends.jax.jax_backend
import tensornetwork.backends.numpy.numpy_backend
with pytest.raises(ImportError):
import torch
with pytest.raises(ImportError):
import tensorflow as tf
with pytest.raises(ImportError):
import jax
@pytest.mark.usefixtures('no_backend_dependency')
def test_basic_numpy_network_without_backends():
import tensornetwork
net = tensornetwork.TensorNetwork(backend="numpy")
a = net.add_node(np.ones((10,)))
b = net.add_node(np.ones((10,)))
edge = net.connect(a[0], b[0])
final_node = net.contract(edge)
assert final_node.tensor == np.array(10.)
with pytest.raises(ImportError):
import torch
with pytest.raises(ImportError):
import tensorflow as tf
with pytest.raises(ImportError):
import jax
@pytest.mark.usefixtures('no_backend_dependency')
def test_basic_network_without_backends_raises_error():
import tensornetwork
with pytest.raises(ImportError):
tensornetwork.TensorNetwork(backend="jax")
with pytest.raises(ImportError):
tensornetwork.TensorNetwork(backend="tensorflow")
with pytest.raises(ImportError):
tensornetwork.TensorNetwork(backend="pytorch")
| true
| true
|
1c49ae8cef2d2a65f702a89e8a58574f58e7c5fc
| 11,824
|
py
|
Python
|
tests/test.py
|
tubaman/pyfacebook
|
6fac843c6c52ed916482c9995b4eaa89631aab3a
|
[
"FSFAP"
] | 1
|
2020-05-19T05:38:35.000Z
|
2020-05-19T05:38:35.000Z
|
tests/test.py
|
douglaswth/pyfacebook
|
4a1427808ba41e33698c0e018d2ed44e7993b1c9
|
[
"FSFAP"
] | null | null | null |
tests/test.py
|
douglaswth/pyfacebook
|
4a1427808ba41e33698c0e018d2ed44e7993b1c9
|
[
"FSFAP"
] | null | null | null |
import unittest
import sys
import os
import facebook
import urllib2
try:
from hashlib import md5
md5_constructor = md5
except ImportError:
import md5
md5_constructor = md5.new
try:
import simplejson
except ImportError:
from django.utils import simplejson
import httplib
from minimock import Mock
my_api_key = "e1e9cfeb5e0d7a52e4fbd5d09e1b873e"
my_secret_key = "1bebae7283f5b79aaf9b851addd55b90"
#'{"error_code":100,\
#"error_msg":"Invalid parameter",\
#"request_args":[{"key":"format","value":"JSON"},\
#{"key":"auth_token","value":"24626e24bb12919f2f142145070542e8"},\
#{"key":"sig","value":"36af2af3b93da784149301e77cb1621a"},\
#{"key":"v","value":"1.0"},\
#{"key":"api_key","value":"e1e9cfeb5e0d7a52e4fbd5d09e1b873e"},\
#{"key":"method","value":"facebook.auth.getSession"}]}'
response_str = '{"stuff":"abcd"}'
class MyUrlOpen:
def __init__(self,*args,**kwargs):
pass
def read(self):
global response_str
return response_str
class pyfacebook_UnitTests(unittest.TestCase):
def setUp(self):
facebook.urllib2.urlopen = Mock('urllib2.urlopen')
facebook.urllib2.urlopen.mock_returns_func = MyUrlOpen
pass
def tearDown(self):
pass
def login(self):
pass
def test1(self):
f = facebook.Facebook(api_key=my_api_key, secret_key=my_secret_key)
f.login = self.login
self.assertEquals(f.api_key,my_api_key)
self.assertEquals(f.secret_key,my_secret_key)
self.assertEquals(f.auth_token,None)
self.assertEquals(f.app_name,None)
self.assertEquals(f.callback_path,None)
self.assertEquals(f.internal,None)
def test2(self):
args = {"arg1":"a","arg2":"b","arg3":"c"}
hasher = md5_constructor(''.join(['%s=%s' % (x, args[x]) for x in sorted(args.keys())]))
hasher.update("acdnj")
f = facebook.Facebook(api_key="abcdf", secret_key="acdnj")
f.login = self.login
digest = f._hash_args(args)
self.assertEquals(hasher.hexdigest(),digest)
hasher = md5_constructor(''.join(['%s=%s' % (x, args[x]) for x in sorted(args.keys())]))
hasher.update("klmn")
# trunk code has error hash.updated instead of hash.update
digest = f._hash_args(args,secret="klmn")
self.assertEquals(hasher.hexdigest(),digest)
hasher = md5_constructor(''.join(['%s=%s' % (x, args[x]) for x in sorted(args.keys())]))
f.secret = "klmn"
hasher.update(f.secret)
# trunk code has error hash.updated instead of hash.update
digest = f._hash_args(args)
self.assertEquals(hasher.hexdigest(),digest)
def test3(self):
global response_str
response = {'stuff':'abcd'}
response_str = simplejson.dumps(response)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
fb.auth.createToken()
self.assertEquals(str(fb.auth_token['stuff']),"abcd")
fb.login()
response = {"session_key":"key","uid":"my_uid","secret":"my_secret","expires":"my_expires"}
response_str = simplejson.dumps(response)
res = fb.auth.getSession()
self.assertEquals(str(res["expires"]),response["expires"])
self.assertEquals(str(res["secret"]),response["secret"])
self.assertEquals(str(res["session_key"]),response["session_key"])
self.assertEquals(str(res["uid"]),response["uid"])
def test4(self):
global response_str
response = 'abcdef'
response_str = simplejson.dumps(response)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
fb.auth.createToken()
self.assertEquals(str(fb.auth_token),"abcdef")
url = fb.get_login_url(next="nowhere", popup=True, canvas=True)
self.assertEquals(url,
'http://www.facebook.com/login.php?canvas=1&popup=1&auth_token=abcdef&next=nowhere&v=1.0&api_key=%s'%(my_api_key,))
def test5(self):
class Request:
def __init__(self,post,get,method):
self.POST = post
self.GET = get
self.method = method
req = Request({'fb_sig_in_canvas':1},{},'POST')
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
res = fb.check_session(req)
self.assertFalse(res)
req = Request({'fb_sig':1},{},'POST')
res = fb.check_session(req)
self.assertFalse(res)
req = Request({'fb_sig':fb._hash_args({'in_canvas':'1',
'added':'1',
'expires':'1',
'friends':'joe,mary',
'session_key':'abc',
'user':'bob'}),
'fb_sig_in_canvas':'1',
'fb_sig_added':'1',
'fb_sig_expires':'1',
'fb_sig_friends':'joe,mary',
'fb_sig_session_key':'abc',
'fb_sig_user':'bob'},
{},'POST')
res = fb.check_session(req)
self.assertTrue(res)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
req = Request({'fb_sig':fb._hash_args({'in_canvas':'1',
'added':'1',
'expires':'1',
'friends':'',
'session_key':'abc',
'user':'bob'}),
'fb_sig_in_canvas':'1',
'fb_sig_added':'1',
'fb_sig_expires':'1',
'fb_sig_friends':'',
'fb_sig_session_key':'abc',
'fb_sig_user':'bob'},
{},'POST')
res = fb.check_session(req)
self.assertTrue(res)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
req = Request({'fb_sig':fb._hash_args({'in_canvas':'1',
'added':'1',
'expires':'1',
'friends':'',
'session_key':'abc',
'page_id':'id'}),
'fb_sig_in_canvas':'1',
'fb_sig_added':'1',
'fb_sig_expires':'1',
'fb_sig_friends':'',
'fb_sig_session_key':'abc',
'fb_sig_page_id':'id'},
{},'POST')
res = fb.check_session(req)
self.assertTrue(res)
def test6(self):
global response_str
response = 'abcdef'
response_str = simplejson.dumps(response)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
fb.auth.createToken()
# self.failUnlessRaises(RuntimeError,fb._add_session_args)
response = {"session_key":"key","uid":"my_uid","secret":"my_secret","expires":"my_expires"}
response_str = simplejson.dumps(response)
fb.auth.getSession()
args = fb._add_session_args()
def test7(self):
global response_str
response = 'abcdef'
response_str = simplejson.dumps(response)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
fb.auth.createToken()
self.assertEquals(str(fb.auth_token),"abcdef")
url = fb.get_authorize_url(next="next",next_cancel="next_cancel")
self.assertEquals(url,
'http://www.facebook.com/authorize.php?api_key=%s&next_cancel=next_cancel&v=1.0&next=next' % (my_api_key,))
def test8(self):
class Request:
def __init__(self,post,get,method):
self.POST = post
self.GET = get
self.method = method
global response_str
response = {"session_key":"abcdef","uid":"my_uid","secret":"my_secret","expires":"my_expires"}
response_str = simplejson.dumps(response)
req = Request({},{'installed':1,'fb_page_id':'id','auth_token':'abcdef'},'GET')
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
res = fb.check_session(req)
self.assertTrue(res)
def test9(self):
global response_str
response = 'abcdef'
response_str = simplejson.dumps(response)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
fb.auth.createToken()
self.assertEquals(str(fb.auth_token),"abcdef")
url = fb.get_add_url(next="next")
self.assertEquals(url,
'http://www.facebook.com/install.php?api_key=%s&v=1.0&next=next' % (my_api_key,))
def send(self,xml):
self.xml = xml
def test10(self):
import Image
image1 = Image.new("RGB", (400, 300), (255, 255, 255))
filename = "image_file.jpg"
image1.save(filename)
global response_str
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
facebook.httplib.HTTP = Mock('httplib.HTTP')
http_connection = Mock('http_connection')
facebook.httplib.HTTP.mock_returns = http_connection
http_connection.send.mock_returns_func = self.send
def _http_passes():
return [200,]
http_connection.getreply.mock_returns_func = _http_passes
def read():
response = {"stuff":"stuff"}
response_str = simplejson.dumps(response)
return response_str
http_connection.file.read.mock_returns_func = read
response = {"session_key":"key","uid":"my_uid","secret":"my_secret","expires":"my_expires"}
response_str = simplejson.dumps(response)
res = fb.auth.getSession()
result = fb.photos.upload(image=filename,aid="aid",caption="a caption")
self.assertEquals(str(result["stuff"]),"stuff")
os.remove(filename)
if __name__ == "__main__":
# Build the test suite
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(pyfacebook_UnitTests))
# Execute the test suite
print("Testing Proxy class\n")
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(len(result.errors) + len(result.failures))
| 43.470588
| 142
| 0.505751
|
import unittest
import sys
import os
import facebook
import urllib2
try:
from hashlib import md5
md5_constructor = md5
except ImportError:
import md5
md5_constructor = md5.new
try:
import simplejson
except ImportError:
from django.utils import simplejson
import httplib
from minimock import Mock
my_api_key = "e1e9cfeb5e0d7a52e4fbd5d09e1b873e"
my_secret_key = "1bebae7283f5b79aaf9b851addd55b90"
#"error_msg":"Invalid parameter",\
#"request_args":[{"key":"format","value":"JSON"},\
#{"key":"auth_token","value":"24626e24bb12919f2f142145070542e8"},\
#{"key":"sig","value":"36af2af3b93da784149301e77cb1621a"},\
#{"key":"v","value":"1.0"},\
#{"key":"api_key","value":"e1e9cfeb5e0d7a52e4fbd5d09e1b873e"},\
#{"key":"method","value":"facebook.auth.getSession"}]}'
response_str = '{"stuff":"abcd"}'
class MyUrlOpen:
def __init__(self,*args,**kwargs):
pass
def read(self):
global response_str
return response_str
class pyfacebook_UnitTests(unittest.TestCase):
def setUp(self):
facebook.urllib2.urlopen = Mock('urllib2.urlopen')
facebook.urllib2.urlopen.mock_returns_func = MyUrlOpen
pass
def tearDown(self):
pass
def login(self):
pass
def test1(self):
f = facebook.Facebook(api_key=my_api_key, secret_key=my_secret_key)
f.login = self.login
self.assertEquals(f.api_key,my_api_key)
self.assertEquals(f.secret_key,my_secret_key)
self.assertEquals(f.auth_token,None)
self.assertEquals(f.app_name,None)
self.assertEquals(f.callback_path,None)
self.assertEquals(f.internal,None)
def test2(self):
args = {"arg1":"a","arg2":"b","arg3":"c"}
hasher = md5_constructor(''.join(['%s=%s' % (x, args[x]) for x in sorted(args.keys())]))
hasher.update("acdnj")
f = facebook.Facebook(api_key="abcdf", secret_key="acdnj")
f.login = self.login
digest = f._hash_args(args)
self.assertEquals(hasher.hexdigest(),digest)
hasher = md5_constructor(''.join(['%s=%s' % (x, args[x]) for x in sorted(args.keys())]))
hasher.update("klmn")
digest = f._hash_args(args,secret="klmn")
self.assertEquals(hasher.hexdigest(),digest)
hasher = md5_constructor(''.join(['%s=%s' % (x, args[x]) for x in sorted(args.keys())]))
f.secret = "klmn"
hasher.update(f.secret)
digest = f._hash_args(args)
self.assertEquals(hasher.hexdigest(),digest)
def test3(self):
global response_str
response = {'stuff':'abcd'}
response_str = simplejson.dumps(response)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
fb.auth.createToken()
self.assertEquals(str(fb.auth_token['stuff']),"abcd")
fb.login()
response = {"session_key":"key","uid":"my_uid","secret":"my_secret","expires":"my_expires"}
response_str = simplejson.dumps(response)
res = fb.auth.getSession()
self.assertEquals(str(res["expires"]),response["expires"])
self.assertEquals(str(res["secret"]),response["secret"])
self.assertEquals(str(res["session_key"]),response["session_key"])
self.assertEquals(str(res["uid"]),response["uid"])
def test4(self):
global response_str
response = 'abcdef'
response_str = simplejson.dumps(response)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
fb.auth.createToken()
self.assertEquals(str(fb.auth_token),"abcdef")
url = fb.get_login_url(next="nowhere", popup=True, canvas=True)
self.assertEquals(url,
'http://www.facebook.com/login.php?canvas=1&popup=1&auth_token=abcdef&next=nowhere&v=1.0&api_key=%s'%(my_api_key,))
def test5(self):
class Request:
def __init__(self,post,get,method):
self.POST = post
self.GET = get
self.method = method
req = Request({'fb_sig_in_canvas':1},{},'POST')
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
res = fb.check_session(req)
self.assertFalse(res)
req = Request({'fb_sig':1},{},'POST')
res = fb.check_session(req)
self.assertFalse(res)
req = Request({'fb_sig':fb._hash_args({'in_canvas':'1',
'added':'1',
'expires':'1',
'friends':'joe,mary',
'session_key':'abc',
'user':'bob'}),
'fb_sig_in_canvas':'1',
'fb_sig_added':'1',
'fb_sig_expires':'1',
'fb_sig_friends':'joe,mary',
'fb_sig_session_key':'abc',
'fb_sig_user':'bob'},
{},'POST')
res = fb.check_session(req)
self.assertTrue(res)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
req = Request({'fb_sig':fb._hash_args({'in_canvas':'1',
'added':'1',
'expires':'1',
'friends':'',
'session_key':'abc',
'user':'bob'}),
'fb_sig_in_canvas':'1',
'fb_sig_added':'1',
'fb_sig_expires':'1',
'fb_sig_friends':'',
'fb_sig_session_key':'abc',
'fb_sig_user':'bob'},
{},'POST')
res = fb.check_session(req)
self.assertTrue(res)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
req = Request({'fb_sig':fb._hash_args({'in_canvas':'1',
'added':'1',
'expires':'1',
'friends':'',
'session_key':'abc',
'page_id':'id'}),
'fb_sig_in_canvas':'1',
'fb_sig_added':'1',
'fb_sig_expires':'1',
'fb_sig_friends':'',
'fb_sig_session_key':'abc',
'fb_sig_page_id':'id'},
{},'POST')
res = fb.check_session(req)
self.assertTrue(res)
def test6(self):
global response_str
response = 'abcdef'
response_str = simplejson.dumps(response)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
fb.auth.createToken()
response = {"session_key":"key","uid":"my_uid","secret":"my_secret","expires":"my_expires"}
response_str = simplejson.dumps(response)
fb.auth.getSession()
args = fb._add_session_args()
def test7(self):
global response_str
response = 'abcdef'
response_str = simplejson.dumps(response)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
fb.auth.createToken()
self.assertEquals(str(fb.auth_token),"abcdef")
url = fb.get_authorize_url(next="next",next_cancel="next_cancel")
self.assertEquals(url,
'http://www.facebook.com/authorize.php?api_key=%s&next_cancel=next_cancel&v=1.0&next=next' % (my_api_key,))
def test8(self):
class Request:
def __init__(self,post,get,method):
self.POST = post
self.GET = get
self.method = method
global response_str
response = {"session_key":"abcdef","uid":"my_uid","secret":"my_secret","expires":"my_expires"}
response_str = simplejson.dumps(response)
req = Request({},{'installed':1,'fb_page_id':'id','auth_token':'abcdef'},'GET')
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
res = fb.check_session(req)
self.assertTrue(res)
def test9(self):
global response_str
response = 'abcdef'
response_str = simplejson.dumps(response)
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
fb.auth.createToken()
self.assertEquals(str(fb.auth_token),"abcdef")
url = fb.get_add_url(next="next")
self.assertEquals(url,
'http://www.facebook.com/install.php?api_key=%s&v=1.0&next=next' % (my_api_key,))
def send(self,xml):
self.xml = xml
def test10(self):
import Image
image1 = Image.new("RGB", (400, 300), (255, 255, 255))
filename = "image_file.jpg"
image1.save(filename)
global response_str
fb = facebook.Facebook(my_api_key, my_secret_key)
fb.login = self.login
facebook.httplib.HTTP = Mock('httplib.HTTP')
http_connection = Mock('http_connection')
facebook.httplib.HTTP.mock_returns = http_connection
http_connection.send.mock_returns_func = self.send
def _http_passes():
return [200,]
http_connection.getreply.mock_returns_func = _http_passes
def read():
response = {"stuff":"stuff"}
response_str = simplejson.dumps(response)
return response_str
http_connection.file.read.mock_returns_func = read
response = {"session_key":"key","uid":"my_uid","secret":"my_secret","expires":"my_expires"}
response_str = simplejson.dumps(response)
res = fb.auth.getSession()
result = fb.photos.upload(image=filename,aid="aid",caption="a caption")
self.assertEquals(str(result["stuff"]),"stuff")
os.remove(filename)
if __name__ == "__main__":
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(pyfacebook_UnitTests))
print("Testing Proxy class\n")
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(len(result.errors) + len(result.failures))
| true
| true
|
1c49aea1b2fec5e7a7e723bbfc78bae2a63ad735
| 3,912
|
py
|
Python
|
atom/nucleus/python/nucleus_api/models/decision_tree_result_vo.py
|
sumit4-ttn/SDK
|
b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff
|
[
"Apache-2.0"
] | null | null | null |
atom/nucleus/python/nucleus_api/models/decision_tree_result_vo.py
|
sumit4-ttn/SDK
|
b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff
|
[
"Apache-2.0"
] | null | null | null |
atom/nucleus/python/nucleus_api/models/decision_tree_result_vo.py
|
sumit4-ttn/SDK
|
b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Hydrogen Atom API
The Hydrogen Atom API # noqa: E501
OpenAPI spec version: 1.7.0
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DecisionTreeResultVO(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'entity_id': 'list[str]',
'entity_type': 'str'
}
attribute_map = {
'entity_id': 'entity_id',
'entity_type': 'entity_type'
}
def __init__(self, entity_id=None, entity_type=None): # noqa: E501
"""DecisionTreeResultVO - a model defined in Swagger""" # noqa: E501
self._entity_id = None
self._entity_type = None
self.discriminator = None
if entity_id is not None:
self.entity_id = entity_id
if entity_type is not None:
self.entity_type = entity_type
@property
def entity_id(self):
"""Gets the entity_id of this DecisionTreeResultVO. # noqa: E501
:return: The entity_id of this DecisionTreeResultVO. # noqa: E501
:rtype: list[str]
"""
return self._entity_id
@entity_id.setter
def entity_id(self, entity_id):
"""Sets the entity_id of this DecisionTreeResultVO.
:param entity_id: The entity_id of this DecisionTreeResultVO. # noqa: E501
:type: list[str]
"""
self._entity_id = entity_id
@property
def entity_type(self):
"""Gets the entity_type of this DecisionTreeResultVO. # noqa: E501
:return: The entity_type of this DecisionTreeResultVO. # noqa: E501
:rtype: str
"""
return self._entity_type
@entity_type.setter
def entity_type(self, entity_type):
"""Sets the entity_type of this DecisionTreeResultVO.
:param entity_type: The entity_type of this DecisionTreeResultVO. # noqa: E501
:type: str
"""
self._entity_type = entity_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DecisionTreeResultVO, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DecisionTreeResultVO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.549296
| 87
| 0.57771
|
import pprint
import re
import six
class DecisionTreeResultVO(object):
swagger_types = {
'entity_id': 'list[str]',
'entity_type': 'str'
}
attribute_map = {
'entity_id': 'entity_id',
'entity_type': 'entity_type'
}
def __init__(self, entity_id=None, entity_type=None):
self._entity_id = None
self._entity_type = None
self.discriminator = None
if entity_id is not None:
self.entity_id = entity_id
if entity_type is not None:
self.entity_type = entity_type
@property
def entity_id(self):
return self._entity_id
@entity_id.setter
def entity_id(self, entity_id):
self._entity_id = entity_id
@property
def entity_type(self):
return self._entity_type
@entity_type.setter
def entity_type(self, entity_type):
self._entity_type = entity_type
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DecisionTreeResultVO, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, DecisionTreeResultVO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
1c49af06fb182620ad77a0022f91480b1c789ab3
| 909
|
py
|
Python
|
posts/migrations/0001_initial.py
|
CodeEnvironment/django-rest-framework-deploy-heroku
|
c6ffb20961c193b0f4dc1289de904b5d6750f335
|
[
"MIT"
] | 3
|
2021-04-05T14:02:44.000Z
|
2022-01-25T07:50:20.000Z
|
posts/migrations/0001_initial.py
|
CodeEnvironment/django-rest-framework-deploy-aws
|
d9cf1d016e22b9b5697c769bd094776d25a3f90b
|
[
"MIT"
] | null | null | null |
posts/migrations/0001_initial.py
|
CodeEnvironment/django-rest-framework-deploy-aws
|
d9cf1d016e22b9b5697c769bd094776d25a3f90b
|
[
"MIT"
] | 1
|
2022-01-23T15:09:59.000Z
|
2022-01-23T15:09:59.000Z
|
# Generated by Django 2.2.5 on 2020-08-16 10:43
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Posts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post_title', models.CharField(max_length=200)),
('post_body', models.TextField(max_length=1000)),
],
),
migrations.CreateModel(
name='PostsRates',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('likes', models.BigIntegerField(default=0)),
('dislikes', models.BigIntegerField(default=0)),
],
),
]
| 29.322581
| 114
| 0.561056
|
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Posts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post_title', models.CharField(max_length=200)),
('post_body', models.TextField(max_length=1000)),
],
),
migrations.CreateModel(
name='PostsRates',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('likes', models.BigIntegerField(default=0)),
('dislikes', models.BigIntegerField(default=0)),
],
),
]
| true
| true
|
1c49af27f673eeb7de969da70743dbbc5e30b487
| 154
|
py
|
Python
|
Python/euclidian.py
|
AbdulConsole/Hacktoberfest2019-2
|
b9619361b6cecf9b3e734972af3b0a03dba98d2e
|
[
"MIT"
] | 1
|
2019-10-28T20:12:23.000Z
|
2019-10-28T20:12:23.000Z
|
Python/euclidian.py
|
AbdulConsole/Hacktoberfest2019-2
|
b9619361b6cecf9b3e734972af3b0a03dba98d2e
|
[
"MIT"
] | null | null | null |
Python/euclidian.py
|
AbdulConsole/Hacktoberfest2019-2
|
b9619361b6cecf9b3e734972af3b0a03dba98d2e
|
[
"MIT"
] | 1
|
2020-10-16T14:10:12.000Z
|
2020-10-16T14:10:12.000Z
|
from math import sqrt
# define euclidian distance
def euclidienne(x,y):
return sqrt((x[0]-y[0])**2+(x[1]-y[1])**2)
print(euclidienne((1,3),(2,2)))
| 17.111111
| 46
| 0.62987
|
from math import sqrt
def euclidienne(x,y):
return sqrt((x[0]-y[0])**2+(x[1]-y[1])**2)
print(euclidienne((1,3),(2,2)))
| true
| true
|
1c49afe5fad9b3edfdb62cbe3f1abd9199670eec
| 15,586
|
py
|
Python
|
pscript/parser3.py
|
JesusZerpa/pscript
|
5ac86c1b5983b47f3f0554e0801893d284f84b2d
|
[
"BSD-2-Clause"
] | 190
|
2018-02-26T00:19:37.000Z
|
2022-03-29T13:35:33.000Z
|
pscript/parser3.py
|
JesusZerpa/pscript
|
5ac86c1b5983b47f3f0554e0801893d284f84b2d
|
[
"BSD-2-Clause"
] | 53
|
2018-03-21T22:39:46.000Z
|
2022-01-15T05:22:05.000Z
|
pscript/parser3.py
|
JesusZerpa/pscript
|
5ac86c1b5983b47f3f0554e0801893d284f84b2d
|
[
"BSD-2-Clause"
] | 21
|
2018-04-16T21:13:00.000Z
|
2022-02-27T23:28:14.000Z
|
"""
Python Builtins
---------------
Most builtin functions (that make sense in JS) are automatically
translated to JavaScript: isinstance, issubclass, callable, hasattr,
getattr, setattr, delattr, print, len, max, min, chr, ord, dict, list,
tuple, range, pow, sum, round, int, float, str, bool, abs, divmod, all,
any, enumerate, zip, reversed, sorted, filter, map.
Further all methods for list, dict and str are implemented (except str
methods: encode, decode, format_map, isprintable, maketrans).
.. pscript_example::
# "self" is replaced with "this"
self.foo
# Printing just works
print('some test')
print(a, b, c, sep='-')
# Getting the length of a string or array
len(foo)
# Rounding and abs
round(foo) # round to nearest integer
int(foo) # round towards 0 as in Python
abs(foo)
# min and max
min(foo)
min(a, b, c)
max(foo)
max(a, b, c)
# divmod
a, b = divmod(100, 7) # -> 14, 2
# Aggregation
sum(foo)
all(foo)
any(foo)
# Turning things into numbers, bools and strings
str(s)
float(x)
bool(y)
int(z) # this rounds towards zero like in Python
chr(65) # -> 'A'
ord('A') # -> 65
# Turning things into lists and dicts
dict([['foo', 1], ['bar', 2]]) # -> {'foo': 1, 'bar': 2}
list('abc') # -> ['a', 'b', 'c']
dict(other_dict) # make a copy
list(other_list) # make copy
The isinstance function (and friends)
-------------------------------------
The ``isinstance()`` function works for all JS primitive types, but also
for user-defined classes.
.. pscript_example::
# Basic types
isinstance(3, float) # in JS there are no ints
isinstance('', str)
isinstance([], list)
isinstance({}, dict)
isinstance(foo, types.FunctionType)
# Can also use JS strings
isinstance(3, 'number')
isinstance('', 'string')
isinstance([], 'array')
isinstance({}, 'object')
isinstance(foo, 'function')
# You can use it on your own types too ...
isinstance(x, MyClass)
isinstance(x, 'MyClass') # equivalent
isinstance(x, 'Object') # also yields true (subclass of Object)
# issubclass works too
issubclass(Foo, Bar)
# As well as callable
callable(foo)
hasattr, getattr, setattr and delattr
-------------------------------------
.. pscript_example::
a = {'foo': 1, 'bar': 2}
hasattr(a, 'foo') # -> True
hasattr(a, 'fooo') # -> False
hasattr(null, 'foo') # -> False
getattr(a, 'foo') # -> 1
getattr(a, 'fooo') # -> raise AttributeError
getattr(a, 'fooo', 3) # -> 3
getattr(null, 'foo', 3) # -> 3
setattr(a, 'foo', 2)
delattr(a, 'foo')
Creating sequences
------------------
.. pscript_example::
range(10)
range(2, 10, 2)
range(100, 0, -1)
reversed(foo)
sorted(foo)
enumerate(foo)
zip(foo, bar)
filter(func, foo)
map(func, foo)
List methods
------------
.. pscript_example::
# Call a.append() if it exists, otherwise a.push()
a.append(x)
# Similar for remove()
a.remove(x)
Dict methods
------------
.. pscript_example::
a = {'foo': 3}
a['foo']
a.get('foo', 0)
a.get('foo')
a.keys()
Str methods
-----------
.. pscript_example::
"foobar".startswith('foo')
"foobar".replace('foo', 'bar')
"foobar".upper()
Using JS specific functionality
-------------------------------
When writing PScript inside Python modules, we recommend that where
specific JavaScript functionality is used, that the references are
prefixed with ``window.`` Where ``window`` represents the global JS
namespace. All global JavaScript objects, functions, and variables
automatically become members of the ``window`` object. This helps
make it clear that the functionality is specific to JS, and also
helps static code analysis tools like flake8.
.. pscript_example::
from pscript import window # this is a stub
def foo(a):
return window.Math.cos(a)
Aside from ``window``, ``pscript`` also provides ``undefined``,
``Inifinity``, and ``NaN``.
"""
from . import commonast as ast
from . import stdlib
from .parser2 import Parser2, JSError, unify # noqa
from .stubs import RawJS
# This class has several `function_foo()` and `method_bar()` methods
# to implement corresponding functionality. Most of these are
# auto-generated from the stdlib. However, some methods need explicit
# implementation, e.g. to parse keyword arguments, or are inlined rather
# than implemented via the stlib.
#
# Note that when the number of arguments does not match, almost all
# functions raise a compile-time error. The methods, however, will
# bypass the stdlib in this case, because it is assumed that the user
# intended to call a special method on the object.
class Parser3(Parser2):
""" Parser to transcompile Python to JS, allowing more Pythonic
code, like ``self``, ``print()``, ``len()``, list methods, etc.
"""
def function_this_is_js(self, node):
# Note that we handle this_is_js() shortcuts in the if-statement
# directly. This replacement with a string is when this_is_js()
# is used outside an if statement.
if len(node.arg_nodes) != 0:
raise JSError('this_is_js() expects zero arguments.')
return ('"this_is_js()"')
def function_RawJS(self, node):
if len(node.arg_nodes) == 1:
if not isinstance(node.arg_nodes[0], ast.Str):
raise JSError('RawJS needs a verbatim string (use multiple '
'args to bypass PScript\'s RawJS).')
lines = RawJS._str2lines(node.arg_nodes[0].value.strip())
nl = '\n' + (self._indent * 4) * ' '
return nl.join(lines)
else:
return None # maybe RawJS is a thing
## Python builtin functions
def function_isinstance(self, node):
if len(node.arg_nodes) != 2:
raise JSError('isinstance() expects two arguments.')
ob = unify(self.parse(node.arg_nodes[0]))
cls = unify(self.parse(node.arg_nodes[1]))
if cls[0] in '"\'':
cls = cls[1:-1] # remove quotes
BASIC_TYPES = ('number', 'boolean', 'string', 'function', 'array',
'object', 'null', 'undefined')
MAP = {'[int, float]': 'number', '[float, int]': 'number', 'float': 'number',
'str': 'string', 'basestring': 'string', 'string_types': 'string',
'bool': 'boolean',
'FunctionType': 'function', 'types.FunctionType': 'function',
'list': 'array', 'tuple': 'array',
'[list, tuple]': 'array', '[tuple, list]': 'array',
'dict': 'object',
}
cmp = MAP.get(cls, cls)
if cmp == 'array':
return ['Array.isArray(', ob, ')']
elif cmp.lower() in BASIC_TYPES:
# Basic type, use Object.prototype.toString
return ["Object.prototype.toString.call(", ob ,
").slice(8,-1).toLowerCase() === '%s'" % cmp.lower()]
# In http://stackoverflow.com/questions/11108877 the following is
# proposed, which might be better in theory, but is > 50% slower
return ["({}).toString.call(",
ob,
r").match(/\s([a-zA-Z]+)/)[1].toLowerCase() === ",
"'%s'" % cmp.lower()
]
else:
# User defined type, use instanceof
# http://tobyho.com/2011/01/28/checking-types-in-javascript/
cmp = unify(cls)
if cmp[0] == '(':
raise JSError('isinstance() can only compare to simple types')
return ob, " instanceof ", cmp
def function_issubclass(self, node):
# issubclass only needs to work on custom classes
if len(node.arg_nodes) != 2:
raise JSError('issubclass() expects two arguments.')
cls1 = unify(self.parse(node.arg_nodes[0]))
cls2 = unify(self.parse(node.arg_nodes[1]))
if cls2 == 'object':
cls2 = 'Object'
return '(%s.prototype instanceof %s)' % (cls1, cls2)
def function_print(self, node):
# Process keywords
sep, end = '" "', ''
for kw in node.kwarg_nodes:
if kw.name == 'sep':
sep = ''.join(self.parse(kw.value_node))
elif kw.name == 'end':
end = ''.join(self.parse(kw.value_node))
elif kw.name in ('file', 'flush'):
raise JSError('print() file and flush args not supported')
else:
raise JSError('Invalid argument for print(): %r' % kw.name)
# Combine args
args = [unify(self.parse(arg)) for arg in node.arg_nodes]
end = (" + %s" % end) if (args and end and end != '\n') else ''
combiner = ' + %s + ' % sep
args_concat = combiner.join(args) or '""'
return 'console.log(' + args_concat + end + ')'
def function_len(self, node):
if len(node.arg_nodes) == 1:
return unify(self.parse(node.arg_nodes[0])), '.length'
else:
return None # don't apply this feature
def function_max(self, node):
if len(node.arg_nodes) == 0:
raise JSError('max() needs at least one argument')
elif len(node.arg_nodes) == 1:
arg = ''.join(self.parse(node.arg_nodes[0]))
return 'Math.max.apply(null, ', arg, ')'
else:
args = ', '.join([unify(self.parse(arg)) for arg in node.arg_nodes])
return 'Math.max(', args, ')'
def function_min(self, node):
if len(node.arg_nodes) == 0:
raise JSError('min() needs at least one argument')
elif len(node.arg_nodes) == 1:
arg = ''.join(self.parse(node.arg_nodes[0]))
return 'Math.min.apply(null, ', arg, ')'
else:
args = ', '.join([unify(self.parse(arg)) for arg in node.arg_nodes])
return 'Math.min(', args, ')'
def function_callable(self, node):
if len(node.arg_nodes) == 1:
arg = unify(self.parse(node.arg_nodes[0]))
return '(typeof %s === "function")' % arg
else:
raise JSError('callable() needs at least one argument')
def function_chr(self, node):
if len(node.arg_nodes) == 1:
arg = ''.join(self.parse(node.arg_nodes[0]))
return 'String.fromCharCode(%s)' % arg
else:
raise JSError('chr() needs at least one argument')
def function_ord(self, node):
if len(node.arg_nodes) == 1:
arg = ''.join(self.parse(node.arg_nodes[0]))
return '%s.charCodeAt(0)' % arg
else:
raise JSError('ord() needs at least one argument')
def function_dict(self, node):
if len(node.arg_nodes) == 0:
kwargs = ['%s:%s' % (arg.name, unify(self.parse(arg.value_node)))
for arg in node.kwarg_nodes]
return '{%s}' % ', '.join(kwargs)
if len(node.arg_nodes) == 1:
return self.use_std_function('dict', node.arg_nodes)
else:
raise JSError('dict() needs at least one argument')
def function_list(self, node):
if len(node.arg_nodes) == 0:
return '[]'
if len(node.arg_nodes) == 1:
return self.use_std_function('list', node.arg_nodes)
else:
raise JSError('list() needs at least one argument')
def function_tuple(self, node):
return self.function_list(node)
def function_range(self, node):
if len(node.arg_nodes) == 1:
args = ast.Num(0), node.arg_nodes[0], ast.Num(1)
return self.use_std_function('range', args)
elif len(node.arg_nodes) == 2:
args = node.arg_nodes[0], node.arg_nodes[1], ast.Num(1)
return self.use_std_function('range', args)
elif len(node.arg_nodes) == 3:
return self.use_std_function('range', node.arg_nodes)
else:
raise JSError('range() needs 1, 2 or 3 arguments')
def function_sorted(self, node):
if len(node.arg_nodes) == 1:
key, reverse = ast.Name('undefined'), ast.NameConstant(False)
for kw in node.kwarg_nodes:
if kw.name == 'key':
key = kw.value_node
elif kw.name == 'reverse':
reverse = kw.value_node
else:
raise JSError('Invalid keyword argument for sorted: %r' % kw.name)
return self.use_std_function('sorted', [node.arg_nodes[0], key, reverse])
else:
raise JSError('sorted() needs one argument')
## Methods of list/dict/str
def method_sort(self, node, base):
if len(node.arg_nodes) == 0: # sorts args are keyword-only
key, reverse = ast.Name('undefined'), ast.NameConstant(False)
for kw in node.kwarg_nodes:
if kw.name == 'key':
key = kw.value_node
elif kw.name == 'reverse':
reverse = kw.value_node
else:
raise JSError('Invalid keyword argument for sort: %r' % kw.name)
return self.use_std_method(base, 'sort', [key, reverse])
def method_format(self, node, base):
if node.kwarg_nodes:
raise JSError('Method format() does not support keyword args.')
return self.use_std_method(base, 'format', node.arg_nodes)
# Add functions and methods to the class, using the stdib functions ...
def make_function(name, nargs, function_deps, method_deps):
def function_X(self, node):
if node.kwarg_nodes:
raise JSError('Function %s does not support keyword args.' % name)
if len(node.arg_nodes) not in nargs:
raise JSError('Function %s needs #args in %r.' % (name, nargs))
for dep in function_deps:
self.use_std_function(dep, [])
for dep in method_deps:
self.use_std_method('x', dep, [])
return self.use_std_function(name, node.arg_nodes)
return function_X
def make_method(name, nargs, function_deps, method_deps):
def method_X(self, node, base):
if node.kwarg_nodes:
raise JSError('Method %s does not support keyword args.' % name)
if len(node.arg_nodes) not in nargs:
return None # call as-is, don't use our variant
for dep in function_deps:
self.use_std_function(dep, [])
for dep in method_deps:
self.use_std_method('x', dep, [])
return self.use_std_method(base, name, node.arg_nodes)
return method_X
for name, code in stdlib.METHODS.items():
nargs, function_deps, method_deps = stdlib.get_std_info(code)
if nargs and not hasattr(Parser3, 'method_' + name):
m = make_method(name, tuple(nargs), function_deps, method_deps)
setattr(Parser3, 'method_' + name, m)
for name, code in stdlib.FUNCTIONS.items():
nargs, function_deps, method_deps = stdlib.get_std_info(code)
if nargs and not hasattr(Parser3, 'function_' + name):
m = make_function(name, tuple(nargs), function_deps, method_deps)
setattr(Parser3, 'function_' + name, m)
| 33.663067
| 86
| 0.568074
|
from . import commonast as ast
from . import stdlib
from .parser2 import Parser2, JSError, unify
from .stubs import RawJS
class Parser3(Parser2):
def function_this_is_js(self, node):
if len(node.arg_nodes) != 0:
raise JSError('this_is_js() expects zero arguments.')
return ('"this_is_js()"')
def function_RawJS(self, node):
if len(node.arg_nodes) == 1:
if not isinstance(node.arg_nodes[0], ast.Str):
raise JSError('RawJS needs a verbatim string (use multiple '
'args to bypass PScript\'s RawJS).')
lines = RawJS._str2lines(node.arg_nodes[0].value.strip())
nl = '\n' + (self._indent * 4) * ' '
return nl.join(lines)
else:
return None # maybe RawJS is a thing
## Python builtin functions
def function_isinstance(self, node):
if len(node.arg_nodes) != 2:
raise JSError('isinstance() expects two arguments.')
ob = unify(self.parse(node.arg_nodes[0]))
cls = unify(self.parse(node.arg_nodes[1]))
if cls[0] in '"\'':
cls = cls[1:-1] # remove quotes
BASIC_TYPES = ('number', 'boolean', 'string', 'function', 'array',
'object', 'null', 'undefined')
MAP = {'[int, float]': 'number', '[float, int]': 'number', 'float': 'number',
'str': 'string', 'basestring': 'string', 'string_types': 'string',
'bool': 'boolean',
'FunctionType': 'function', 'types.FunctionType': 'function',
'list': 'array', 'tuple': 'array',
'[list, tuple]': 'array', '[tuple, list]': 'array',
'dict': 'object',
}
cmp = MAP.get(cls, cls)
if cmp == 'array':
return ['Array.isArray(', ob, ')']
elif cmp.lower() in BASIC_TYPES:
# Basic type, use Object.prototype.toString
return ["Object.prototype.toString.call(", ob ,
").slice(8,-1).toLowerCase() === '%s'" % cmp.lower()]
# In http://stackoverflow.com/questions/11108877 the following is
# proposed, which might be better in theory, but is > 50% slower
return ["({}).toString.call(",
ob,
r").match(/\s([a-zA-Z]+)/)[1].toLowerCase() === ",
"'%s'" % cmp.lower()
]
else:
# User defined type, use instanceof
# http://tobyho.com/2011/01/28/checking-types-in-javascript/
cmp = unify(cls)
if cmp[0] == '(':
raise JSError('isinstance() can only compare to simple types')
return ob, " instanceof ", cmp
def function_issubclass(self, node):
# issubclass only needs to work on custom classes
if len(node.arg_nodes) != 2:
raise JSError('issubclass() expects two arguments.')
cls1 = unify(self.parse(node.arg_nodes[0]))
cls2 = unify(self.parse(node.arg_nodes[1]))
if cls2 == 'object':
cls2 = 'Object'
return '(%s.prototype instanceof %s)' % (cls1, cls2)
def function_print(self, node):
# Process keywords
sep, end = '" "', ''
for kw in node.kwarg_nodes:
if kw.name == 'sep':
sep = ''.join(self.parse(kw.value_node))
elif kw.name == 'end':
end = ''.join(self.parse(kw.value_node))
elif kw.name in ('file', 'flush'):
raise JSError('print() file and flush args not supported')
else:
raise JSError('Invalid argument for print(): %r' % kw.name)
# Combine args
args = [unify(self.parse(arg)) for arg in node.arg_nodes]
end = (" + %s" % end) if (args and end and end != '\n') else ''
combiner = ' + %s + ' % sep
args_concat = combiner.join(args) or '""'
return 'console.log(' + args_concat + end + ')'
def function_len(self, node):
if len(node.arg_nodes) == 1:
return unify(self.parse(node.arg_nodes[0])), '.length'
else:
return None # don't apply this feature
def function_max(self, node):
if len(node.arg_nodes) == 0:
raise JSError('max() needs at least one argument')
elif len(node.arg_nodes) == 1:
arg = ''.join(self.parse(node.arg_nodes[0]))
return 'Math.max.apply(null, ', arg, ')'
else:
args = ', '.join([unify(self.parse(arg)) for arg in node.arg_nodes])
return 'Math.max(', args, ')'
def function_min(self, node):
if len(node.arg_nodes) == 0:
raise JSError('min() needs at least one argument')
elif len(node.arg_nodes) == 1:
arg = ''.join(self.parse(node.arg_nodes[0]))
return 'Math.min.apply(null, ', arg, ')'
else:
args = ', '.join([unify(self.parse(arg)) for arg in node.arg_nodes])
return 'Math.min(', args, ')'
def function_callable(self, node):
if len(node.arg_nodes) == 1:
arg = unify(self.parse(node.arg_nodes[0]))
return '(typeof %s === "function")' % arg
else:
raise JSError('callable() needs at least one argument')
def function_chr(self, node):
if len(node.arg_nodes) == 1:
arg = ''.join(self.parse(node.arg_nodes[0]))
return 'String.fromCharCode(%s)' % arg
else:
raise JSError('chr() needs at least one argument')
def function_ord(self, node):
if len(node.arg_nodes) == 1:
arg = ''.join(self.parse(node.arg_nodes[0]))
return '%s.charCodeAt(0)' % arg
else:
raise JSError('ord() needs at least one argument')
def function_dict(self, node):
if len(node.arg_nodes) == 0:
kwargs = ['%s:%s' % (arg.name, unify(self.parse(arg.value_node)))
for arg in node.kwarg_nodes]
return '{%s}' % ', '.join(kwargs)
if len(node.arg_nodes) == 1:
return self.use_std_function('dict', node.arg_nodes)
else:
raise JSError('dict() needs at least one argument')
def function_list(self, node):
if len(node.arg_nodes) == 0:
return '[]'
if len(node.arg_nodes) == 1:
return self.use_std_function('list', node.arg_nodes)
else:
raise JSError('list() needs at least one argument')
def function_tuple(self, node):
return self.function_list(node)
def function_range(self, node):
if len(node.arg_nodes) == 1:
args = ast.Num(0), node.arg_nodes[0], ast.Num(1)
return self.use_std_function('range', args)
elif len(node.arg_nodes) == 2:
args = node.arg_nodes[0], node.arg_nodes[1], ast.Num(1)
return self.use_std_function('range', args)
elif len(node.arg_nodes) == 3:
return self.use_std_function('range', node.arg_nodes)
else:
raise JSError('range() needs 1, 2 or 3 arguments')
def function_sorted(self, node):
if len(node.arg_nodes) == 1:
key, reverse = ast.Name('undefined'), ast.NameConstant(False)
for kw in node.kwarg_nodes:
if kw.name == 'key':
key = kw.value_node
elif kw.name == 'reverse':
reverse = kw.value_node
else:
raise JSError('Invalid keyword argument for sorted: %r' % kw.name)
return self.use_std_function('sorted', [node.arg_nodes[0], key, reverse])
else:
raise JSError('sorted() needs one argument')
## Methods of list/dict/str
def method_sort(self, node, base):
if len(node.arg_nodes) == 0: # sorts args are keyword-only
key, reverse = ast.Name('undefined'), ast.NameConstant(False)
for kw in node.kwarg_nodes:
if kw.name == 'key':
key = kw.value_node
elif kw.name == 'reverse':
reverse = kw.value_node
else:
raise JSError('Invalid keyword argument for sort: %r' % kw.name)
return self.use_std_method(base, 'sort', [key, reverse])
def method_format(self, node, base):
if node.kwarg_nodes:
raise JSError('Method format() does not support keyword args.')
return self.use_std_method(base, 'format', node.arg_nodes)
# Add functions and methods to the class, using the stdib functions ...
def make_function(name, nargs, function_deps, method_deps):
def function_X(self, node):
if node.kwarg_nodes:
raise JSError('Function %s does not support keyword args.' % name)
if len(node.arg_nodes) not in nargs:
raise JSError('Function %s needs #args in %r.' % (name, nargs))
for dep in function_deps:
self.use_std_function(dep, [])
for dep in method_deps:
self.use_std_method('x', dep, [])
return self.use_std_function(name, node.arg_nodes)
return function_X
def make_method(name, nargs, function_deps, method_deps):
def method_X(self, node, base):
if node.kwarg_nodes:
raise JSError('Method %s does not support keyword args.' % name)
if len(node.arg_nodes) not in nargs:
return None # call as-is, don't use our variant
for dep in function_deps:
self.use_std_function(dep, [])
for dep in method_deps:
self.use_std_method('x', dep, [])
return self.use_std_method(base, name, node.arg_nodes)
return method_X
for name, code in stdlib.METHODS.items():
nargs, function_deps, method_deps = stdlib.get_std_info(code)
if nargs and not hasattr(Parser3, 'method_' + name):
m = make_method(name, tuple(nargs), function_deps, method_deps)
setattr(Parser3, 'method_' + name, m)
for name, code in stdlib.FUNCTIONS.items():
nargs, function_deps, method_deps = stdlib.get_std_info(code)
if nargs and not hasattr(Parser3, 'function_' + name):
m = make_function(name, tuple(nargs), function_deps, method_deps)
setattr(Parser3, 'function_' + name, m)
| true
| true
|
1c49b1235f8371ebf0a2bfea75889cf5b89b0310
| 977
|
py
|
Python
|
fhir_post_from_directory.py
|
NimbusInformatics/bdcat-fhir-azure-prototype
|
c35184d037423c7bf4e7ccb7c9d2a91a1fc161ca
|
[
"Apache-2.0"
] | null | null | null |
fhir_post_from_directory.py
|
NimbusInformatics/bdcat-fhir-azure-prototype
|
c35184d037423c7bf4e7ccb7c9d2a91a1fc161ca
|
[
"Apache-2.0"
] | null | null | null |
fhir_post_from_directory.py
|
NimbusInformatics/bdcat-fhir-azure-prototype
|
c35184d037423c7bf4e7ccb7c9d2a91a1fc161ca
|
[
"Apache-2.0"
] | 1
|
2020-10-17T20:19:57.000Z
|
2020-10-17T20:19:57.000Z
|
# Given a FHIR server, an auth token, and directory, this script finds the
# the .json files in the directory, and posts them to the FHIR server
import requests
import sys
import json
import urllib3
from pathlib import Path
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
fhir_server_uri = sys.argv[1]
token = sys.argv[2]
directory_path = sys.argv[3]
headers = {'Authorization' : "Bearer " + token, 'Accept' : 'application/json', 'Content-Type' : 'application/json'}
# List all files in directory using pathlib
basepath = Path(directory_path)
files_in_basepath = (entry for entry in basepath.iterdir() if entry.is_file())
for item in files_in_basepath:
if (item.name.endswith('.json')):
print(directory_path + '/' + item.name)
with open(directory_path + '/' + item.name) as json_file:
json_data = json.load(json_file)
r = requests.post(fhir_server_uri, data=json.dumps(json_data), headers=headers, verify=False)
print(r.json())
| 31.516129
| 115
| 0.741044
|
import requests
import sys
import json
import urllib3
from pathlib import Path
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
fhir_server_uri = sys.argv[1]
token = sys.argv[2]
directory_path = sys.argv[3]
headers = {'Authorization' : "Bearer " + token, 'Accept' : 'application/json', 'Content-Type' : 'application/json'}
basepath = Path(directory_path)
files_in_basepath = (entry for entry in basepath.iterdir() if entry.is_file())
for item in files_in_basepath:
if (item.name.endswith('.json')):
print(directory_path + '/' + item.name)
with open(directory_path + '/' + item.name) as json_file:
json_data = json.load(json_file)
r = requests.post(fhir_server_uri, data=json.dumps(json_data), headers=headers, verify=False)
print(r.json())
| true
| true
|
1c49b15084ebbefe896922b679e5df502b62eff9
| 889
|
py
|
Python
|
tests/bugs/core_1489_test.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
tests/bugs/core_1489_test.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
tests/bugs/core_1489_test.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
#coding:utf-8
#
# id: bugs.core_1489
# title: DATEADD wrong work with NULL arguments
# decription:
# tracker_id: CORE-1489
# min_versions: []
# versions: 2.1.0
# qmid: bugs.core_1489
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.1.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """SELECT 1, DATEADD(SECOND, Null, CAST('01.01.2007' AS DATE)) FROM RDB$DATABASE;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
CONSTANT DATEADD
============ ===========
1 <null>
"""
@pytest.mark.version('>=2.1.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout
| 21.682927
| 97
| 0.656918
|
import pytest
from firebird.qa import db_factory, isql_act, Action
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """SELECT 1, DATEADD(SECOND, Null, CAST('01.01.2007' AS DATE)) FROM RDB$DATABASE;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
CONSTANT DATEADD
============ ===========
1 <null>
"""
@pytest.mark.version('>=2.1.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout
| true
| true
|
1c49b49b5655f4bd51f7a622c855b147b4dbaa5f
| 6,632
|
py
|
Python
|
phypartspiecharts/phypartspiecharts.py
|
joelnitta/phyloscripts
|
ef308fc45e8aae904bf8e235ec0a9809b588a6ea
|
[
"MIT"
] | null | null | null |
phypartspiecharts/phypartspiecharts.py
|
joelnitta/phyloscripts
|
ef308fc45e8aae904bf8e235ec0a9809b588a6ea
|
[
"MIT"
] | null | null | null |
phypartspiecharts/phypartspiecharts.py
|
joelnitta/phyloscripts
|
ef308fc45e8aae904bf8e235ec0a9809b588a6ea
|
[
"MIT"
] | 2
|
2020-06-08T18:11:32.000Z
|
2021-04-05T13:43:14.000Z
|
#!/usr/bin/env python
helptext= '''
Generate the "Pie Chart" representation of gene tree conflict from Smith et al. 2015 from
the output of phyparts, the bipartition summary software described in the same paper.
The input files include three files produced by PhyParts, and a file containing a species
tree in Newick format (likely, the tree used for PhyParts). The output is an SVG containing
the phylogeny along with pie charts at each node.
Requirements:
Python 2.7
ete3
matplotlib
'''
import matplotlib,sys,argparse
from ete3 import Tree, TreeStyle, TextFace,NodeStyle,faces, COLOR_SCHEMES
#Read in species tree and convert to ultrametric
#Match phyparts nodes to ete3 nodes
def get_phyparts_nodes(sptree_fn,phyparts_root):
sptree = Tree(sptree_fn)
sptree.convert_to_ultrametric()
phyparts_node_key = [line for line in open(phyparts_root+".node.key")]
subtrees_dict = {n.split()[0]:Tree(n.split()[1]+";") for n in phyparts_node_key}
subtrees_topids = {}
for x in subtrees_dict:
subtrees_topids[x] = subtrees_dict[x].get_topology_id()
#print(subtrees_topids['1'])
#print()
for node in sptree.traverse():
node_topid = node.get_topology_id()
if "Takakia_4343a" in node.get_leaf_names():
print(node_topid)
print(node)
for subtree in subtrees_dict:
if node_topid == subtrees_topids[subtree]:
node.name = subtree
return sptree,subtrees_dict,subtrees_topids
#Summarize concordance and conflict from Phyparts
def get_concord_and_conflict(phyparts_root,subtrees_dict,subtrees_topids):
with open(phyparts_root + ".concon.tre") as phyparts_trees:
concon_tree = Tree(phyparts_trees.readline())
conflict_tree = Tree(phyparts_trees.readline())
concord_dict = {}
conflict_dict = {}
for node in concon_tree.traverse():
node_topid = node.get_topology_id()
for subtree in subtrees_dict:
if node_topid == subtrees_topids[subtree]:
concord_dict[subtree] = node.support
for node in conflict_tree.traverse():
node_topid = node.get_topology_id()
for subtree in subtrees_dict:
if node_topid == subtrees_topids[subtree]:
conflict_dict[subtree] = node.support
return concord_dict, conflict_dict
#Generate Pie Chart data
def get_pie_chart_data(phyparts_root,total_genes,concord_dict,conflict_dict):
phyparts_hist = [line for line in open(phyparts_root + ".hist")]
phyparts_pies = {}
phyparts_dict = {}
for n in phyparts_hist:
n = n.split(",")
tot_genes = float(n.pop(-1))
node_name = n.pop(0)[4:]
concord = float(n.pop(0))
concord = concord_dict[node_name]
all_conflict = conflict_dict[node_name]
if len(n) > 0:
most_conflict = max([float(x) for x in n])
else:
most_conflict = 0.0
adj_concord = (concord/total_genes) * 100
adj_most_conflict = (most_conflict/total_genes) * 100
other_conflict = (all_conflict - most_conflict) / total_genes * 100
the_rest = (total_genes - concord - all_conflict) / total_genes * 100
pie_list = [adj_concord,adj_most_conflict,other_conflict,the_rest]
phyparts_pies[node_name] = pie_list
phyparts_dict[node_name] = [int(round(concord,0)),int(round(tot_genes-concord,0))]
return phyparts_dict, phyparts_pies
def node_text_layout(mynode):
F = faces.TextFace(mynode.name,fsize=20)
faces.add_face_to_node(F,mynode,0,position="branch-right")
parser = argparse.ArgumentParser(description=helptext,formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('species_tree',help="Newick formatted species tree topology.")
parser.add_argument('phyparts_root',help="File root name used for Phyparts.")
parser.add_argument('num_genes',type=int,default=0,help="Number of total gene trees. Used to properly scale pie charts.")
parser.add_argument('--taxon_subst',help="Comma-delimted file to translate tip names.")
parser.add_argument("--svg_name",help="File name for SVG generated by script",default="pies.svg")
parser.add_argument("--show_nodes",help="Also show tree with nodes labeled same as PhyParts",action="store_true",default=False)
parser.add_argument("--colors",help="Four colors of the pie chart: concordance (blue) top conflict (green), other conflict (red), no signal (gray)",nargs="+",default=["blue","green","red","dark gray"])
parser.add_argument("--no_ladderize",help="Do not ladderize the input species tree.",action="store_true",default=False)
args = parser.parse_args()
if args.no_ladderize:
ladderize=False
else:
ladderize=True
plot_tree,subtrees_dict,subtrees_topids = get_phyparts_nodes(args.species_tree, args.phyparts_root)
#print(subtrees_dict)
concord_dict, conflict_dict = get_concord_and_conflict(args.phyparts_root,subtrees_dict,subtrees_topids)
phyparts_dist, phyparts_pies = get_pie_chart_data(args.phyparts_root,args.num_genes,concord_dict,conflict_dict)
if args.taxon_subst:
taxon_subst = {line.split(",")[0]:line.split(",")[1] for line in open(args.taxon_subst,'U')}
for leaf in plot_tree.get_leaves():
try:
leaf.name = taxon_subst[leaf.name]
except KeyError:
print(leaf.name)
continue
def phyparts_pie_layout(mynode):
if mynode.name in phyparts_pies:
pie= faces.PieChartFace(phyparts_pies[mynode.name],
#colors=COLOR_SCHEMES["set1"],
colors = args.colors,
width=50, height=50)
pie.border.width = None
pie.opacity = 1
faces.add_face_to_node(pie,mynode, 0, position="branch-right")
concord_text = faces.TextFace(str(int(concord_dict[mynode.name]))+' ',fsize=20)
conflict_text = faces.TextFace(str(int(conflict_dict[mynode.name]))+' ',fsize=20)
faces.add_face_to_node(concord_text,mynode,0,position = "branch-top")
faces.add_face_to_node(conflict_text,mynode,0,position="branch-bottom")
else:
F = faces.TextFace(mynode.name,fsize=20)
faces.add_face_to_node(F,mynode,0,position="aligned")
#Plot Pie Chart
ts = TreeStyle()
ts.show_leaf_name = False
ts.layout_fn = phyparts_pie_layout
nstyle = NodeStyle()
nstyle["size"] = 0
for n in plot_tree.traverse():
n.set_style(nstyle)
n.img_style["vt_line_width"] = 0
ts.draw_guiding_lines = True
ts.guiding_lines_color = "black"
ts.guiding_lines_type = 0
ts.scale = 30
ts.branch_vertical_margin = 10
plot_tree.convert_to_ultrametric()
if ladderize:
plot_tree.ladderize(direction=1)
my_svg = plot_tree.render(args.svg_name,tree_style=ts,w=595,dpi=300)
if args.show_nodes:
node_style = TreeStyle()
node_style.show_leaf_name=False
node_style.layout_fn = node_text_layout
plot_tree.render("tree_nodes.pdf",tree_style=node_style)
| 35.465241
| 202
| 0.740199
|
helptext= '''
Generate the "Pie Chart" representation of gene tree conflict from Smith et al. 2015 from
the output of phyparts, the bipartition summary software described in the same paper.
The input files include three files produced by PhyParts, and a file containing a species
tree in Newick format (likely, the tree used for PhyParts). The output is an SVG containing
the phylogeny along with pie charts at each node.
Requirements:
Python 2.7
ete3
matplotlib
'''
import matplotlib,sys,argparse
from ete3 import Tree, TreeStyle, TextFace,NodeStyle,faces, COLOR_SCHEMES
def get_phyparts_nodes(sptree_fn,phyparts_root):
sptree = Tree(sptree_fn)
sptree.convert_to_ultrametric()
phyparts_node_key = [line for line in open(phyparts_root+".node.key")]
subtrees_dict = {n.split()[0]:Tree(n.split()[1]+";") for n in phyparts_node_key}
subtrees_topids = {}
for x in subtrees_dict:
subtrees_topids[x] = subtrees_dict[x].get_topology_id()
for node in sptree.traverse():
node_topid = node.get_topology_id()
if "Takakia_4343a" in node.get_leaf_names():
print(node_topid)
print(node)
for subtree in subtrees_dict:
if node_topid == subtrees_topids[subtree]:
node.name = subtree
return sptree,subtrees_dict,subtrees_topids
def get_concord_and_conflict(phyparts_root,subtrees_dict,subtrees_topids):
with open(phyparts_root + ".concon.tre") as phyparts_trees:
concon_tree = Tree(phyparts_trees.readline())
conflict_tree = Tree(phyparts_trees.readline())
concord_dict = {}
conflict_dict = {}
for node in concon_tree.traverse():
node_topid = node.get_topology_id()
for subtree in subtrees_dict:
if node_topid == subtrees_topids[subtree]:
concord_dict[subtree] = node.support
for node in conflict_tree.traverse():
node_topid = node.get_topology_id()
for subtree in subtrees_dict:
if node_topid == subtrees_topids[subtree]:
conflict_dict[subtree] = node.support
return concord_dict, conflict_dict
def get_pie_chart_data(phyparts_root,total_genes,concord_dict,conflict_dict):
phyparts_hist = [line for line in open(phyparts_root + ".hist")]
phyparts_pies = {}
phyparts_dict = {}
for n in phyparts_hist:
n = n.split(",")
tot_genes = float(n.pop(-1))
node_name = n.pop(0)[4:]
concord = float(n.pop(0))
concord = concord_dict[node_name]
all_conflict = conflict_dict[node_name]
if len(n) > 0:
most_conflict = max([float(x) for x in n])
else:
most_conflict = 0.0
adj_concord = (concord/total_genes) * 100
adj_most_conflict = (most_conflict/total_genes) * 100
other_conflict = (all_conflict - most_conflict) / total_genes * 100
the_rest = (total_genes - concord - all_conflict) / total_genes * 100
pie_list = [adj_concord,adj_most_conflict,other_conflict,the_rest]
phyparts_pies[node_name] = pie_list
phyparts_dict[node_name] = [int(round(concord,0)),int(round(tot_genes-concord,0))]
return phyparts_dict, phyparts_pies
def node_text_layout(mynode):
F = faces.TextFace(mynode.name,fsize=20)
faces.add_face_to_node(F,mynode,0,position="branch-right")
parser = argparse.ArgumentParser(description=helptext,formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('species_tree',help="Newick formatted species tree topology.")
parser.add_argument('phyparts_root',help="File root name used for Phyparts.")
parser.add_argument('num_genes',type=int,default=0,help="Number of total gene trees. Used to properly scale pie charts.")
parser.add_argument('--taxon_subst',help="Comma-delimted file to translate tip names.")
parser.add_argument("--svg_name",help="File name for SVG generated by script",default="pies.svg")
parser.add_argument("--show_nodes",help="Also show tree with nodes labeled same as PhyParts",action="store_true",default=False)
parser.add_argument("--colors",help="Four colors of the pie chart: concordance (blue) top conflict (green), other conflict (red), no signal (gray)",nargs="+",default=["blue","green","red","dark gray"])
parser.add_argument("--no_ladderize",help="Do not ladderize the input species tree.",action="store_true",default=False)
args = parser.parse_args()
if args.no_ladderize:
ladderize=False
else:
ladderize=True
plot_tree,subtrees_dict,subtrees_topids = get_phyparts_nodes(args.species_tree, args.phyparts_root)
concord_dict, conflict_dict = get_concord_and_conflict(args.phyparts_root,subtrees_dict,subtrees_topids)
phyparts_dist, phyparts_pies = get_pie_chart_data(args.phyparts_root,args.num_genes,concord_dict,conflict_dict)
if args.taxon_subst:
taxon_subst = {line.split(",")[0]:line.split(",")[1] for line in open(args.taxon_subst,'U')}
for leaf in plot_tree.get_leaves():
try:
leaf.name = taxon_subst[leaf.name]
except KeyError:
print(leaf.name)
continue
def phyparts_pie_layout(mynode):
if mynode.name in phyparts_pies:
pie= faces.PieChartFace(phyparts_pies[mynode.name],
colors = args.colors,
width=50, height=50)
pie.border.width = None
pie.opacity = 1
faces.add_face_to_node(pie,mynode, 0, position="branch-right")
concord_text = faces.TextFace(str(int(concord_dict[mynode.name]))+' ',fsize=20)
conflict_text = faces.TextFace(str(int(conflict_dict[mynode.name]))+' ',fsize=20)
faces.add_face_to_node(concord_text,mynode,0,position = "branch-top")
faces.add_face_to_node(conflict_text,mynode,0,position="branch-bottom")
else:
F = faces.TextFace(mynode.name,fsize=20)
faces.add_face_to_node(F,mynode,0,position="aligned")
ts = TreeStyle()
ts.show_leaf_name = False
ts.layout_fn = phyparts_pie_layout
nstyle = NodeStyle()
nstyle["size"] = 0
for n in plot_tree.traverse():
n.set_style(nstyle)
n.img_style["vt_line_width"] = 0
ts.draw_guiding_lines = True
ts.guiding_lines_color = "black"
ts.guiding_lines_type = 0
ts.scale = 30
ts.branch_vertical_margin = 10
plot_tree.convert_to_ultrametric()
if ladderize:
plot_tree.ladderize(direction=1)
my_svg = plot_tree.render(args.svg_name,tree_style=ts,w=595,dpi=300)
if args.show_nodes:
node_style = TreeStyle()
node_style.show_leaf_name=False
node_style.layout_fn = node_text_layout
plot_tree.render("tree_nodes.pdf",tree_style=node_style)
| true
| true
|
1c49b4d84b1d5b5b155c9774146d459ed14b8043
| 408
|
py
|
Python
|
examples/use_cases/case4_show_commands/send_command.py
|
johnbarneta/netmiko
|
331187987526f0f784bdf28c85c5256c480d955e
|
[
"MIT"
] | 2
|
2019-07-23T02:27:19.000Z
|
2019-07-23T02:27:25.000Z
|
examples/use_cases/case4_show_commands/send_command.py
|
johnbarneta/netmiko
|
331187987526f0f784bdf28c85c5256c480d955e
|
[
"MIT"
] | 4
|
2020-03-21T22:58:35.000Z
|
2020-03-25T12:11:26.000Z
|
examples/use_cases/case4_show_commands/send_command.py
|
johnbarneta/netmiko
|
331187987526f0f784bdf28c85c5256c480d955e
|
[
"MIT"
] | 1
|
2019-10-16T19:02:32.000Z
|
2019-10-16T19:02:32.000Z
|
#!/usr/bin/env python
from netmiko import Netmiko
from getpass import getpass
cisco1 = {
"host": "cisco1.twb-tech.com",
"username": "pyclass",
"password": getpass(),
"device_type": "cisco_ios",
}
net_connect = Netmiko(**cisco1)
command = "show ip int brief"
print()
print(net_connect.find_prompt())
output = net_connect.send_command(command)
net_connect.disconnect()
print(output)
print()
| 19.428571
| 42
| 0.708333
|
from netmiko import Netmiko
from getpass import getpass
cisco1 = {
"host": "cisco1.twb-tech.com",
"username": "pyclass",
"password": getpass(),
"device_type": "cisco_ios",
}
net_connect = Netmiko(**cisco1)
command = "show ip int brief"
print()
print(net_connect.find_prompt())
output = net_connect.send_command(command)
net_connect.disconnect()
print(output)
print()
| true
| true
|
1c49b4ed9d05d014fcba032d54090b1151a01932
| 2,265
|
py
|
Python
|
physics3d/character_controller.py
|
B3CTOR/runner-ursina-engine
|
59ce82d1107420f17e3129cbe00ddbbd7047f68b
|
[
"MIT"
] | null | null | null |
physics3d/character_controller.py
|
B3CTOR/runner-ursina-engine
|
59ce82d1107420f17e3129cbe00ddbbd7047f68b
|
[
"MIT"
] | null | null | null |
physics3d/character_controller.py
|
B3CTOR/runner-ursina-engine
|
59ce82d1107420f17e3129cbe00ddbbd7047f68b
|
[
"MIT"
] | null | null | null |
from ursina import Entity, Vec3, application
from panda3d.bullet import BulletWorld, BulletCapsuleShape, BulletCharacterControllerNode
class CharacterController(BulletCharacterControllerNode):
def __init__(self, world:BulletWorld, entity:Entity, radius=1, height=2, name='Player', **opts) -> None:
super().__init__(BulletCapsuleShape(radius/2, height/2, 1), radius/2, name)
self.np = application.base.render.attachNewNode(self)
if entity.parent:
self.np.reparent_to(entity.parent)
rotation = Vec3(0, 0, 0)
if None in rotation:
hpr = entity.getHpr()
for x in range(len(hpr)):
rotation[x] = hpr[x]
self.np.setHpr(rotation)
self.np.setPos(entity.x, entity.y, entity.z)
entity.reparent_to(self.np)
world.attachCharacter(self)
self.__fall_speed = None
self.__jump_speed = None
self.__max_jump_height = None
for x in opts:
setattr(self, x, opts[x])
def jump(self):
self.doJump()
def move(self, vel:Vec3, is_local:bool):
self.setLinearMovement(vel, is_local)
def rotate(self, omega:float):
self.setAngularMovement(omega)
@property
def can_jump(self):
return self.canJump()
@property
def fall_speed(self):
return self.__fall_speed
@fall_speed.setter
def fall_speed(self, speed:float):
self.__fall_speed = speed
self.setFallSpeed(speed)
@property
def gravity(self):
return self.gravity
@gravity.setter
def gravity(self, grav:float):
self.setGravity(grav)
@property
def jump_speed(self):
return self.__jump_speed
@jump_speed.setter
def jump_speed(self, speed:float):
self.__jump_speed = speed
self.setJumpSpeed(speed)
@property
def max_jump_height(self):
return self.__max_jump_height
@max_jump_height.setter
def max_jump_height(self, max_jump_height:float):
self.__max_jump_height = max_jump_height
self.setMaxJumpHeight(max_jump_height)
@property
def on_ground(self):
return self.isOnGround()
| 27.621951
| 108
| 0.628256
|
from ursina import Entity, Vec3, application
from panda3d.bullet import BulletWorld, BulletCapsuleShape, BulletCharacterControllerNode
class CharacterController(BulletCharacterControllerNode):
def __init__(self, world:BulletWorld, entity:Entity, radius=1, height=2, name='Player', **opts) -> None:
super().__init__(BulletCapsuleShape(radius/2, height/2, 1), radius/2, name)
self.np = application.base.render.attachNewNode(self)
if entity.parent:
self.np.reparent_to(entity.parent)
rotation = Vec3(0, 0, 0)
if None in rotation:
hpr = entity.getHpr()
for x in range(len(hpr)):
rotation[x] = hpr[x]
self.np.setHpr(rotation)
self.np.setPos(entity.x, entity.y, entity.z)
entity.reparent_to(self.np)
world.attachCharacter(self)
self.__fall_speed = None
self.__jump_speed = None
self.__max_jump_height = None
for x in opts:
setattr(self, x, opts[x])
def jump(self):
self.doJump()
def move(self, vel:Vec3, is_local:bool):
self.setLinearMovement(vel, is_local)
def rotate(self, omega:float):
self.setAngularMovement(omega)
@property
def can_jump(self):
return self.canJump()
@property
def fall_speed(self):
return self.__fall_speed
@fall_speed.setter
def fall_speed(self, speed:float):
self.__fall_speed = speed
self.setFallSpeed(speed)
@property
def gravity(self):
return self.gravity
@gravity.setter
def gravity(self, grav:float):
self.setGravity(grav)
@property
def jump_speed(self):
return self.__jump_speed
@jump_speed.setter
def jump_speed(self, speed:float):
self.__jump_speed = speed
self.setJumpSpeed(speed)
@property
def max_jump_height(self):
return self.__max_jump_height
@max_jump_height.setter
def max_jump_height(self, max_jump_height:float):
self.__max_jump_height = max_jump_height
self.setMaxJumpHeight(max_jump_height)
@property
def on_ground(self):
return self.isOnGround()
| true
| true
|
1c49b5f49bd919e8e9e35d1e105629de687aa9d2
| 2,770
|
py
|
Python
|
nasa/nasa_response_handler.py
|
mariosyb/pub_twitter_apod_bot
|
a0da8ae049cee5cb7df2d702e750615e332a9668
|
[
"Apache-2.0"
] | null | null | null |
nasa/nasa_response_handler.py
|
mariosyb/pub_twitter_apod_bot
|
a0da8ae049cee5cb7df2d702e750615e332a9668
|
[
"Apache-2.0"
] | null | null | null |
nasa/nasa_response_handler.py
|
mariosyb/pub_twitter_apod_bot
|
a0da8ae049cee5cb7df2d702e750615e332a9668
|
[
"Apache-2.0"
] | null | null | null |
import enum
from datetime import date, timedelta
COPYRIGHT_PARAMETER = 'copyright' # this will be in response if the image is not public domain
TITLE_PARAMETER = 'title'
EXPLANATION_PARAMETER = 'explanation'
MEDIA_TYPE_PARAMETER = 'media_type'
HD_URL_PARAMETER = 'hdurl'
DATE_FORMAT = '%Y-%m-%d'
class NasaMediaType(enum.Enum):
image = 'IMAGE'
video = 'VIDEO'
def getImageUrl(response):
"""gets photo url
Args:
response (Dict): Custom response from NASA APOD API
Returns:
String: image url
"""
url = response['response_data_raw'][HD_URL_PARAMETER]
return url
def getImageExplanation(response):
"""gets photo explanation
Args:
response (Dict): Custom response from NASA APOD API
Returns:
String: image explanation
"""
explanation = response['response_data_raw'][EXPLANATION_PARAMETER]
return explanation
def getImageTitle(response):
"""gets image title
Args:
response (Dict): Custom response from NASA APOD API
Returns:
String: image title
"""
return response['response_data_raw'][TITLE_PARAMETER]
def getImageCopyright(response):
"""checks if the image has copyright or not
Args:
response (Dict): Custom response from NASA APOD API
Returns:
String: copyright value or 'public domain' if the image has no copyright
"""
copyright = None
if COPYRIGHT_PARAMETER in response['response_data_raw']:
copyright = response['response_data_raw'][COPYRIGHT_PARAMETER]
else:
copyright = 'public domain'
return copyright
def validateMedia(mediaType, response):
"""validates media type os NASA response
Args:
mediaType (NasaMediaType): desire media type from enum
response (Dict): cusntom service response
Returns:
Boolean: True if media type in resonse is the same passed as argument False otherwise
"""
isMediaType = None
responseMediaType = response['response_data_raw'][MEDIA_TYPE_PARAMETER]
if 'IMAGE' == mediaType:
isMediaType = NasaMediaType.image.value == responseMediaType.upper()
elif 'VIDEO' == mediaType:
isMediaType = NasaMediaType.video.value == responseMediaType.upper()
else:
print(f'ERROR: not supported media type: {mediaType}')
return
return isMediaType
def subtractDaysFromCurrentDate(days):
"""subtracts a quantity of days from today's date
Args:
days (integer): number of days to subtract
Returns:
String: formatted YYYY-MM-DD substracted date
"""
today = date.today()
subtractedDate = today - timedelta(days=days)
strFormattedDate = subtractedDate.strftime(DATE_FORMAT)
return strFormattedDate
| 23.87931
| 95
| 0.685199
|
import enum
from datetime import date, timedelta
COPYRIGHT_PARAMETER = 'copyright'
TITLE_PARAMETER = 'title'
EXPLANATION_PARAMETER = 'explanation'
MEDIA_TYPE_PARAMETER = 'media_type'
HD_URL_PARAMETER = 'hdurl'
DATE_FORMAT = '%Y-%m-%d'
class NasaMediaType(enum.Enum):
image = 'IMAGE'
video = 'VIDEO'
def getImageUrl(response):
url = response['response_data_raw'][HD_URL_PARAMETER]
return url
def getImageExplanation(response):
explanation = response['response_data_raw'][EXPLANATION_PARAMETER]
return explanation
def getImageTitle(response):
return response['response_data_raw'][TITLE_PARAMETER]
def getImageCopyright(response):
copyright = None
if COPYRIGHT_PARAMETER in response['response_data_raw']:
copyright = response['response_data_raw'][COPYRIGHT_PARAMETER]
else:
copyright = 'public domain'
return copyright
def validateMedia(mediaType, response):
isMediaType = None
responseMediaType = response['response_data_raw'][MEDIA_TYPE_PARAMETER]
if 'IMAGE' == mediaType:
isMediaType = NasaMediaType.image.value == responseMediaType.upper()
elif 'VIDEO' == mediaType:
isMediaType = NasaMediaType.video.value == responseMediaType.upper()
else:
print(f'ERROR: not supported media type: {mediaType}')
return
return isMediaType
def subtractDaysFromCurrentDate(days):
today = date.today()
subtractedDate = today - timedelta(days=days)
strFormattedDate = subtractedDate.strftime(DATE_FORMAT)
return strFormattedDate
| true
| true
|
1c49b6165bb06b0ed8202ea757ce392b9cc7274e
| 896
|
py
|
Python
|
tests/bcbio/test_bcbio.py
|
parlundin/scilifelab
|
e5f4be45e2e9ff6c0756be46ad34dfb7d20a4b4a
|
[
"MIT"
] | 1
|
2016-03-21T14:04:09.000Z
|
2016-03-21T14:04:09.000Z
|
tests/bcbio/test_bcbio.py
|
parlundin/scilifelab
|
e5f4be45e2e9ff6c0756be46ad34dfb7d20a4b4a
|
[
"MIT"
] | 35
|
2015-01-22T08:25:02.000Z
|
2020-02-17T12:09:12.000Z
|
tests/bcbio/test_bcbio.py
|
parlundin/scilifelab
|
e5f4be45e2e9ff6c0756be46ad34dfb7d20a4b4a
|
[
"MIT"
] | 6
|
2015-01-16T15:32:08.000Z
|
2020-01-30T14:34:40.000Z
|
import os
import tempfile
import shutil
import unittest
from ..data import data_files
from scilifelab.bcbio.qc import RunInfoParser
filedir = os.path.abspath(os.path.realpath(os.path.dirname(__file__)))
RunInfo = data_files["RunInfo.xml"]
class TestBcbioQC(unittest.TestCase):
"""Test for bcbio qc module"""
def setUp(self):
self.rootdir = tempfile.mkdtemp(prefix="test_bcbio_qc_")
def tearDown(self):
shutil.rmtree(self.rootdir)
def test_parse_runinfo(self):
temp = tempfile.TemporaryFile(mode="w+t")
temp.write(RunInfo)
temp.seek(0)
rip = RunInfoParser()
res = rip.parse(temp)
self.assertEqual(res["Id"], "120924_SN0002_0003_CC003CCCXX")
self.assertEqual(res["Flowcell"], "CC003CCCXX")
self.assertEqual(res["Instrument"], "SN0002")
self.assertEqual(res["Date"], "120924")
| 28.903226
| 70
| 0.670759
|
import os
import tempfile
import shutil
import unittest
from ..data import data_files
from scilifelab.bcbio.qc import RunInfoParser
filedir = os.path.abspath(os.path.realpath(os.path.dirname(__file__)))
RunInfo = data_files["RunInfo.xml"]
class TestBcbioQC(unittest.TestCase):
def setUp(self):
self.rootdir = tempfile.mkdtemp(prefix="test_bcbio_qc_")
def tearDown(self):
shutil.rmtree(self.rootdir)
def test_parse_runinfo(self):
temp = tempfile.TemporaryFile(mode="w+t")
temp.write(RunInfo)
temp.seek(0)
rip = RunInfoParser()
res = rip.parse(temp)
self.assertEqual(res["Id"], "120924_SN0002_0003_CC003CCCXX")
self.assertEqual(res["Flowcell"], "CC003CCCXX")
self.assertEqual(res["Instrument"], "SN0002")
self.assertEqual(res["Date"], "120924")
| true
| true
|
1c49b706b8a5d60732fbf10c9acffef8503ea28d
| 7,072
|
py
|
Python
|
examples/simple_dqn.py
|
ofantomas/rlax
|
7bf3bf13d4496f1b708f4ccb5865215a16c618d6
|
[
"Apache-2.0"
] | null | null | null |
examples/simple_dqn.py
|
ofantomas/rlax
|
7bf3bf13d4496f1b708f4ccb5865215a16c618d6
|
[
"Apache-2.0"
] | null | null | null |
examples/simple_dqn.py
|
ofantomas/rlax
|
7bf3bf13d4496f1b708f4ccb5865215a16c618d6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple double-DQN agent trained to play BSuite's Catch env."""
import collections
import random
from absl import app
from absl import flags
from bsuite.environments import catch
import haiku as hk
from haiku import nets
import jax
import jax.numpy as jnp
import numpy as np
import optax
import rlax
import experiment
Params = collections.namedtuple("Params", "online target")
ActorState = collections.namedtuple("ActorState", "count")
ActorOutput = collections.namedtuple("ActorOutput", "actions q_values")
LearnerState = collections.namedtuple("LearnerState", "count opt_state")
Data = collections.namedtuple("Data", "obs_tm1 a_tm1 r_t discount_t obs_t")
FLAGS = flags.FLAGS
flags.DEFINE_integer("seed", 42, "Random seed.")
flags.DEFINE_integer("train_episodes", 301, "Number of train episodes.")
flags.DEFINE_integer("batch_size", 32, "Size of the training batch")
flags.DEFINE_float("target_period", 50, "How often to update the target net.")
flags.DEFINE_integer("replay_capacity", 2000, "Capacity of the replay buffer.")
flags.DEFINE_integer("hidden_units", 50, "Number of network hidden units.")
flags.DEFINE_float("epsilon_begin", 1., "Initial epsilon-greedy exploration.")
flags.DEFINE_float("epsilon_end", 0.01, "Final epsilon-greedy exploration.")
flags.DEFINE_integer("epsilon_steps", 1000, "Steps over which to anneal eps.")
flags.DEFINE_float("discount_factor", 0.99, "Q-learning discount factor.")
flags.DEFINE_float("learning_rate", 0.005, "Optimizer learning rate.")
flags.DEFINE_integer("eval_episodes", 100, "Number of evaluation episodes.")
flags.DEFINE_integer("evaluate_every", 50,
"Number of episodes between evaluations.")
def build_network(num_actions: int) -> hk.Transformed:
"""Factory for a simple MLP network for approximating Q-values."""
def q(obs):
network = hk.Sequential(
[hk.Flatten(),
nets.MLP([FLAGS.hidden_units, num_actions])])
return network(obs)
return hk.without_apply_rng(hk.transform(q))
class ReplayBuffer(object):
"""A simple Python replay buffer."""
def __init__(self, capacity):
self._prev = None
self._action = None
self._latest = None
self.buffer = collections.deque(maxlen=capacity)
def push(self, env_output, action):
self._prev = self._latest
self._action = action
self._latest = env_output
if action is not None:
self.buffer.append(
(self._prev.observation, self._action, self._latest.reward,
self._latest.discount, self._latest.observation))
def sample(self, batch_size):
obs_tm1, a_tm1, r_t, discount_t, obs_t = zip(
*random.sample(self.buffer, batch_size))
return (np.stack(obs_tm1), np.asarray(a_tm1), np.asarray(r_t),
np.asarray(discount_t) * FLAGS.discount_factor, np.stack(obs_t))
def is_ready(self, batch_size):
return batch_size <= len(self.buffer)
class DQN:
"""A simple DQN agent."""
def __init__(self, observation_spec, action_spec, epsilon_cfg, target_period,
learning_rate):
self._observation_spec = observation_spec
self._action_spec = action_spec
self._target_period = target_period
# Neural net and optimiser.
self._network = build_network(action_spec.num_values)
self._optimizer = optax.adam(learning_rate)
self._epsilon_by_frame = optax.polynomial_schedule(**epsilon_cfg)
# Jitting for speed.
self.actor_step = jax.jit(self.actor_step)
self.learner_step = jax.jit(self.learner_step)
def initial_params(self, key):
sample_input = self._observation_spec.generate_value()
sample_input = jnp.expand_dims(sample_input, 0)
online_params = self._network.init(key, sample_input)
return Params(online_params, online_params)
def initial_actor_state(self):
actor_count = jnp.zeros((), dtype=jnp.float32)
return ActorState(actor_count)
def initial_learner_state(self, params):
learner_count = jnp.zeros((), dtype=jnp.float32)
opt_state = self._optimizer.init(params.online)
return LearnerState(learner_count, opt_state)
def actor_step(self, params, env_output, actor_state, key, evaluation):
obs = jnp.expand_dims(env_output.observation, 0) # add dummy batch
q = self._network.apply(params.online, obs)[0] # remove dummy batch
epsilon = self._epsilon_by_frame(actor_state.count)
train_a = rlax.epsilon_greedy(epsilon).sample(key, q)
eval_a = rlax.greedy().sample(key, q)
a = jax.lax.select(evaluation, eval_a, train_a)
return ActorOutput(actions=a, q_values=q), ActorState(actor_state.count + 1)
def learner_step(self, params, data, learner_state, unused_key):
target_params = rlax.periodic_update(
params.online, params.target, learner_state.count, self._target_period)
dloss_dtheta = jax.grad(self._loss)(params.online, target_params, *data)
updates, opt_state = self._optimizer.update(
dloss_dtheta, learner_state.opt_state)
online_params = optax.apply_updates(params.online, updates)
return (
Params(online_params, target_params),
LearnerState(learner_state.count + 1, opt_state))
def _loss(self, online_params, target_params,
obs_tm1, a_tm1, r_t, discount_t, obs_t):
q_tm1 = self._network.apply(online_params, obs_tm1)
q_t_val = self._network.apply(target_params, obs_t)
q_t_select = self._network.apply(online_params, obs_t)
batched_loss = jax.vmap(rlax.double_q_learning)
td_error = batched_loss(q_tm1, a_tm1, r_t, discount_t, q_t_val, q_t_select)
return jnp.mean(rlax.l2_loss(td_error))
def main(unused_arg):
env = catch.Catch(seed=FLAGS.seed)
epsilon_cfg = dict(
init_value=FLAGS.epsilon_begin,
end_value=FLAGS.epsilon_end,
transition_steps=FLAGS.epsilon_steps,
power=1.)
agent = DQN(
observation_spec=env.observation_spec(),
action_spec=env.action_spec(),
epsilon_cfg=epsilon_cfg,
target_period=FLAGS.target_period,
learning_rate=FLAGS.learning_rate,
)
accumulator = ReplayBuffer(FLAGS.replay_capacity)
experiment.run_loop(
agent=agent,
environment=env,
accumulator=accumulator,
seed=FLAGS.seed,
batch_size=FLAGS.batch_size,
train_episodes=FLAGS.train_episodes,
evaluate_every=FLAGS.evaluate_every,
eval_episodes=FLAGS.eval_episodes,
)
if __name__ == "__main__":
app.run(main)
| 38.021505
| 80
| 0.724689
|
import collections
import random
from absl import app
from absl import flags
from bsuite.environments import catch
import haiku as hk
from haiku import nets
import jax
import jax.numpy as jnp
import numpy as np
import optax
import rlax
import experiment
Params = collections.namedtuple("Params", "online target")
ActorState = collections.namedtuple("ActorState", "count")
ActorOutput = collections.namedtuple("ActorOutput", "actions q_values")
LearnerState = collections.namedtuple("LearnerState", "count opt_state")
Data = collections.namedtuple("Data", "obs_tm1 a_tm1 r_t discount_t obs_t")
FLAGS = flags.FLAGS
flags.DEFINE_integer("seed", 42, "Random seed.")
flags.DEFINE_integer("train_episodes", 301, "Number of train episodes.")
flags.DEFINE_integer("batch_size", 32, "Size of the training batch")
flags.DEFINE_float("target_period", 50, "How often to update the target net.")
flags.DEFINE_integer("replay_capacity", 2000, "Capacity of the replay buffer.")
flags.DEFINE_integer("hidden_units", 50, "Number of network hidden units.")
flags.DEFINE_float("epsilon_begin", 1., "Initial epsilon-greedy exploration.")
flags.DEFINE_float("epsilon_end", 0.01, "Final epsilon-greedy exploration.")
flags.DEFINE_integer("epsilon_steps", 1000, "Steps over which to anneal eps.")
flags.DEFINE_float("discount_factor", 0.99, "Q-learning discount factor.")
flags.DEFINE_float("learning_rate", 0.005, "Optimizer learning rate.")
flags.DEFINE_integer("eval_episodes", 100, "Number of evaluation episodes.")
flags.DEFINE_integer("evaluate_every", 50,
"Number of episodes between evaluations.")
def build_network(num_actions: int) -> hk.Transformed:
def q(obs):
network = hk.Sequential(
[hk.Flatten(),
nets.MLP([FLAGS.hidden_units, num_actions])])
return network(obs)
return hk.without_apply_rng(hk.transform(q))
class ReplayBuffer(object):
def __init__(self, capacity):
self._prev = None
self._action = None
self._latest = None
self.buffer = collections.deque(maxlen=capacity)
def push(self, env_output, action):
self._prev = self._latest
self._action = action
self._latest = env_output
if action is not None:
self.buffer.append(
(self._prev.observation, self._action, self._latest.reward,
self._latest.discount, self._latest.observation))
def sample(self, batch_size):
obs_tm1, a_tm1, r_t, discount_t, obs_t = zip(
*random.sample(self.buffer, batch_size))
return (np.stack(obs_tm1), np.asarray(a_tm1), np.asarray(r_t),
np.asarray(discount_t) * FLAGS.discount_factor, np.stack(obs_t))
def is_ready(self, batch_size):
return batch_size <= len(self.buffer)
class DQN:
def __init__(self, observation_spec, action_spec, epsilon_cfg, target_period,
learning_rate):
self._observation_spec = observation_spec
self._action_spec = action_spec
self._target_period = target_period
self._network = build_network(action_spec.num_values)
self._optimizer = optax.adam(learning_rate)
self._epsilon_by_frame = optax.polynomial_schedule(**epsilon_cfg)
self.actor_step = jax.jit(self.actor_step)
self.learner_step = jax.jit(self.learner_step)
def initial_params(self, key):
sample_input = self._observation_spec.generate_value()
sample_input = jnp.expand_dims(sample_input, 0)
online_params = self._network.init(key, sample_input)
return Params(online_params, online_params)
def initial_actor_state(self):
actor_count = jnp.zeros((), dtype=jnp.float32)
return ActorState(actor_count)
def initial_learner_state(self, params):
learner_count = jnp.zeros((), dtype=jnp.float32)
opt_state = self._optimizer.init(params.online)
return LearnerState(learner_count, opt_state)
def actor_step(self, params, env_output, actor_state, key, evaluation):
obs = jnp.expand_dims(env_output.observation, 0)
q = self._network.apply(params.online, obs)[0]
epsilon = self._epsilon_by_frame(actor_state.count)
train_a = rlax.epsilon_greedy(epsilon).sample(key, q)
eval_a = rlax.greedy().sample(key, q)
a = jax.lax.select(evaluation, eval_a, train_a)
return ActorOutput(actions=a, q_values=q), ActorState(actor_state.count + 1)
def learner_step(self, params, data, learner_state, unused_key):
target_params = rlax.periodic_update(
params.online, params.target, learner_state.count, self._target_period)
dloss_dtheta = jax.grad(self._loss)(params.online, target_params, *data)
updates, opt_state = self._optimizer.update(
dloss_dtheta, learner_state.opt_state)
online_params = optax.apply_updates(params.online, updates)
return (
Params(online_params, target_params),
LearnerState(learner_state.count + 1, opt_state))
def _loss(self, online_params, target_params,
obs_tm1, a_tm1, r_t, discount_t, obs_t):
q_tm1 = self._network.apply(online_params, obs_tm1)
q_t_val = self._network.apply(target_params, obs_t)
q_t_select = self._network.apply(online_params, obs_t)
batched_loss = jax.vmap(rlax.double_q_learning)
td_error = batched_loss(q_tm1, a_tm1, r_t, discount_t, q_t_val, q_t_select)
return jnp.mean(rlax.l2_loss(td_error))
def main(unused_arg):
env = catch.Catch(seed=FLAGS.seed)
epsilon_cfg = dict(
init_value=FLAGS.epsilon_begin,
end_value=FLAGS.epsilon_end,
transition_steps=FLAGS.epsilon_steps,
power=1.)
agent = DQN(
observation_spec=env.observation_spec(),
action_spec=env.action_spec(),
epsilon_cfg=epsilon_cfg,
target_period=FLAGS.target_period,
learning_rate=FLAGS.learning_rate,
)
accumulator = ReplayBuffer(FLAGS.replay_capacity)
experiment.run_loop(
agent=agent,
environment=env,
accumulator=accumulator,
seed=FLAGS.seed,
batch_size=FLAGS.batch_size,
train_episodes=FLAGS.train_episodes,
evaluate_every=FLAGS.evaluate_every,
eval_episodes=FLAGS.eval_episodes,
)
if __name__ == "__main__":
app.run(main)
| true
| true
|
1c49b765839e7dc8a74136e35d3e6f215885011c
| 8,060
|
py
|
Python
|
docs/conf.py
|
jonnyguio/tsuru
|
30a8d657fbb05e5b64fe8c7babc67f1e618842bf
|
[
"BSD-3-Clause"
] | null | null | null |
docs/conf.py
|
jonnyguio/tsuru
|
30a8d657fbb05e5b64fe8c7babc67f1e618842bf
|
[
"BSD-3-Clause"
] | null | null | null |
docs/conf.py
|
jonnyguio/tsuru
|
30a8d657fbb05e5b64fe8c7babc67f1e618842bf
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# tsuru documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 8 11:09:54 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.mathjax',
'tsuru_sphinx.handlers',
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tsuru'
copyright = u'2017, tsuru authors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.5'
# The full version, including alpha/beta/rc tags.
release = '1.5.0-rc11'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'tsuru'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['theme']
# if not os.environ.get('READTHEDOCS', None):
# import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme_ext'
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tsurudoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'tsuru.tex', u'tsuru Documentation',
u'tsuru', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tsuru', u'tsuru Documentation',
[u'tsuru'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'tsuru', u'tsuru Documentation',
u'tsuru', 'tsuru', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# tsuru releases
try:
import releases
except:
pass
else:
html_context = {
'releases' : releases.RELEASES,
}
| 30.881226
| 80
| 0.711911
|
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
extensions = [
'sphinx.ext.mathjax',
'tsuru_sphinx.handlers',
]
source_suffix = '.rst'
master_doc = 'index'
project = u'tsuru'
copyright = u'2017, tsuru authors'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.5'
# The full version, including alpha/beta/rc tags.
release = '1.5.0-rc11'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'tsuru'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['theme']
# if not os.environ.get('READTHEDOCS', None):
# import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme_ext'
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tsurudoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'tsuru.tex', u'tsuru Documentation',
u'tsuru', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tsuru', u'tsuru Documentation',
[u'tsuru'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'tsuru', u'tsuru Documentation',
u'tsuru', 'tsuru', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# tsuru releases
try:
import releases
except:
pass
else:
html_context = {
'releases' : releases.RELEASES,
}
| true
| true
|
1c49b884e3cdaad9254dce2a1831a4b6797541bf
| 41
|
py
|
Python
|
tests/__init__.py
|
robert2398/docs_parser
|
8ddb8820ebeb0a5b8da11e81fe7fbfceab71e413
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
robert2398/docs_parser
|
8ddb8820ebeb0a5b8da11e81fe7fbfceab71e413
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
robert2398/docs_parser
|
8ddb8820ebeb0a5b8da11e81fe7fbfceab71e413
|
[
"MIT"
] | null | null | null |
"""Unit test package for docs_parser."""
| 20.5
| 40
| 0.707317
| true
| true
|
|
1c49b978a6af8f17af06e34921de32180b66587b
| 467
|
py
|
Python
|
worlds/migrations/0051_pipeline_logging.py
|
cognitive-space/warpzone
|
06acee2add83cf9ddf981b4e4187dd742e627561
|
[
"MIT"
] | 1
|
2022-02-25T12:04:13.000Z
|
2022-02-25T12:04:13.000Z
|
worlds/migrations/0051_pipeline_logging.py
|
cognitive-space/warpzone
|
06acee2add83cf9ddf981b4e4187dd742e627561
|
[
"MIT"
] | null | null | null |
worlds/migrations/0051_pipeline_logging.py
|
cognitive-space/warpzone
|
06acee2add83cf9ddf981b4e4187dd742e627561
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.10 on 2022-01-17 16:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('worlds', '0050_alter_completedlog_pod'),
]
operations = [
migrations.AddField(
model_name='pipeline',
name='logging',
field=models.CharField(choices=[('kube', 'Kubernetes'), ('shelix', 'Star Helix')], default='kube', max_length=10),
),
]
| 24.578947
| 126
| 0.605996
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('worlds', '0050_alter_completedlog_pod'),
]
operations = [
migrations.AddField(
model_name='pipeline',
name='logging',
field=models.CharField(choices=[('kube', 'Kubernetes'), ('shelix', 'Star Helix')], default='kube', max_length=10),
),
]
| true
| true
|
1c49b9af782f450f47a2cc9226a2b3a154659404
| 9,972
|
py
|
Python
|
sigal.conf.py
|
flamableconcrete/DnD-Watercolor-Gallery
|
6e8984ba56d2516ee0e17cbfb415fb2198d608cd
|
[
"MIT"
] | 7
|
2020-10-02T02:47:00.000Z
|
2022-03-06T13:00:48.000Z
|
sigal.conf.py
|
flamableconcrete/DnD-Watercolor-Gallery
|
6e8984ba56d2516ee0e17cbfb415fb2198d608cd
|
[
"MIT"
] | 1
|
2021-11-28T22:06:20.000Z
|
2021-11-28T22:06:20.000Z
|
sigal.conf.py
|
flamableconcrete/DnD-Watercolor-Gallery
|
6e8984ba56d2516ee0e17cbfb415fb2198d608cd
|
[
"MIT"
] | 1
|
2020-10-02T02:47:04.000Z
|
2020-10-02T02:47:04.000Z
|
# All configuration values have a default; values that are commented out serve
# to show the default. Default values are specified when modified in this
# example config file
# Gallery title. Can be set here or as the '--title' option of the `sigal
# build` command, or in the 'index.md' file of the source directory.
# The priority order is: cli option > settings file > index.md file
# title = "Sigal test gallery"
# ---------------------
# General configuration
# ---------------------
# Source directory. Can be set here or as the first argument of the `sigal
# build` command
source = "albums"
# Destination directory. Can be set here or as the second argument of the
# `sigal build` command (default: '_build')
# destination = '_build'
# Theme :
# - colorbox (default), galleria, photoswipe, or the path to a custom theme
# directory
# theme = 'colorbox'
theme = "my-sigal-theme"
# Author. Used in the footer of the pages and in the author meta tag.
# author = ''
# Use originals in gallery (default: False). If True, this will bypass all
# processing steps (resize, auto-orient, recompress, and any plugin-specific
# step).
# Originals will be symlinked if orig_link = True, else they will be copied.
use_orig = True
# ----------------
# Image processing (ignored if use_orig = True)
# ----------------
# Size of resized image (default: (640, 480))
# img_size = (800, 600)
img_size = (1076, 816)
# Output format of images (default: None, i.e. use input format)
# img_format = "JPEG"
# Show a map of the images where possible?
# This option only has an effect on the galleria theme for the while.
# The leaflet_provider setting allow to customize the tile provider (see
# https://github.com/leaflet-extras/leaflet-providers#providers)
# show_map = False
# leaflet_provider = 'OpenStreetMap.Mapnik'
# File extensions that should be treated as images
# img_extensions = ['.jpg', '.jpeg', '.png', '.gif']
# Pilkit processor used to resize the image
# (see http://pilkit.readthedocs.org/en/latest/#processors)
# - ResizeToFit: fit the image within the specified dimensions (default)
# - ResizeToFill: crop THE IMAGE it to the exact specified width and height
# - SmartResize: identical to ResizeToFill, but uses entropy to crop the image
# - None: don't resize
# img_processor = 'ResizeToFit'
# Autorotate images
# Warning: this setting is not compatible with `copy_exif_data` (see below),
# because Sigal can't save the modified Orientation tag (currently Pillow can't
# write EXIF).
# autorotate_images = True
# If True, EXIF data from the original image is copied to the resized image
# copy_exif_data = False
# Python's datetime format string used for the EXIF date formatting
# https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior
# datetime_format = '%c'
# Jpeg options
# jpg_options = {'quality': 85,
# 'optimize': True,
# 'progressive': True}
# --------------------
# Thumbnail generation
# --------------------
# Generate thumbnails
# make_thumbs = True
# Subdirectory of the thumbnails
# thumb_dir = 'thumbnails'
# Prefix and/or suffix for thumbnail filenames (default: '')
# thumb_prefix = ''
# thumb_suffix = '.tn'
# Thumbnail size (default: (200, 150))
# For the galleria theme, use 280 px for the width
# For the colorbox and photoswipe theme, use 200 px for the width
thumb_size = (200, 267)
# Crop the image to fill the box
thumb_fit = False
# When using thumb_fit, specifies what we should crop
# for usage see
# http://pillow.readthedocs.io/en/stable/reference/ImageOps.html#PIL.ImageOps.fit
# thumb_fit_centering = (0.5, 0.5)
# Delay in seconds to avoid black thumbnails in videos with fade-in
# thumb_video_delay = '0'
# Keep original image (default: False)
# keep_orig = True
# Subdirectory for original images
# orig_dir = 'original'
# Use symbolic links instead of copying the original images
# orig_link = False
# Use symbolic links that are relative to the source directory instead of absolute paths
# rel_link = False
# Attribute of Album objects which is used to sort medias (eg 'title'). To sort
# on a metadata key, use 'meta.key'.
# albums_sort_attr = 'name'
# Reverse sort for albums
# albums_sort_reverse = False
# Attribute of Media objects which is used to sort medias. 'date' can be used
# to sort with EXIF dates, and 'meta.key' to sort on a metadata key (which then
# must exist for all images).
# medias_sort_attr = 'filename'
# Reverse sort for medias
# medias_sort_reverse = False
# Filter directories and files.
# The settings take a list of patterns matched with the fnmatch module on the
# path relative to the source directory:
# http://docs.python.org/2/library/fnmatch.html
ignore_directories = []
ignore_files = []
# -------------
# Video options
# -------------
# Video converter binary (can be 'avconv' on certain GNU/Linux distributions)
# video_converter = 'ffmpeg'
# File extensions that should be treated as video files
# video_extensions = ['.mov', '.avi', '.mp4', '.webm', '.ogv', '.3gp']
# Video format
# specify an alternative format, valid are 'webm' (default) and 'mp4'
# video_format = 'webm'
# Webm options
# Options used in ffmpeg to encode the webm video. You may want to read
# http://ffmpeg.org/trac/ffmpeg/wiki/vpxEncodingGuide
# Be aware of the fact these options need to be passed as strings. If you are
# using avconv (for example with Ubuntu), you will need to adapt the settings.
# webm_options = ['-crf', '10', '-b:v', '1.6M',
# '-qmin', '4', '-qmax', '63']
# MP4 options
# Options used to encode the mp4 video. You may want to read
# https://trac.ffmpeg.org/wiki/Encode/H.264
# mp4_options = ['-crf', '23' ]
# Size of resized video (default: (480, 360))
# video_size = (480, 360)
# -------------
# Miscellaneous
# -------------
# Write HTML files. If False, sigal will only process the images.
# write_html = False
# Name of the generated HTML files
# output_filename = 'index.html'
# Add output filename (see above) to the URLs
index_in_url = True
# A list of links (tuples (title, URL))
links = [
(
"Original Reddit Thread (2018)",
"https://www.reddit.com/r/UnearthedArcana/comments/83w44y/42_full_page_watercolor_stains_for_the_homebrewery/",
),
(
"New Reddit Thread (2020)",
"https://www.reddit.com/r/UnearthedArcana/comments/iqpmek/301_full_page_watercolor_stains_for/",
),
("Gmbinder Guide", "https://www.gmbinder.com/share/-L4Yt8ZSxmhwqt--yNRT"),
("Homebrewery Guide", "https://homebrewery.naturalcrit.com/share/SkKsdJmKf"),
(
"Website Source (GitHub)",
"https://github.com/flamableconcrete/DnD-Watercolor-Gallery",
),
]
# Google Analytics tracking code (UA-xxxx-x)
google_analytics = 'UA-187978011-1'
# Google Tag Manager tracking code (GTM-xxxxxx)
# google_tag_manager = ''
# Piwik tracking
# tracker_url must not contain trailing slash.
# Example : {'tracker_url': 'http://stats.domain.com', 'site_id' : 2}
# piwik = {'tracker_url': '', 'site_id' : 0}
# Specify a different locale. If set to '', the default locale is used.
# locale = ''
# Define language used on main <html> tag in templates
# html_language = 'en'
# List of files to copy from the source directory to the destination.
# A symbolic link is used if ``orig_link`` is set to True (see above).
# files_to_copy = (('extra/robots.txt', 'robots.txt'),
# ('extra/favicon.ico', 'favicon.ico'),)
# Colorbox theme config
# The column size is given in number of column of the css grid of the Skeleton
# framework which is used for this theme: http://www.getskeleton.com/#grid
# Then the image size must be adapted to fit the column size.
# The default is 3 columns (176px).
# colorbox_column_size = 3
# Site Logo - Use a logo file in the sidebar
# Only for colorbox currently, it could be adapted for other themes
# You must place the logo file into the theme's static images folder, which
# can be done using 'files_to_copy':
# files_to_copy = (('extra/logo.png', 'static/logo.png'))
# site_logo = 'logo.png'
# --------
# Plugins
# --------
# List of plugins to use. The values must be a path than can be imported.
# Another option is to import the plugin and put the module in the list, but
# this will break with the multiprocessing feature (the settings dict obtained
# from this file must be serializable).
plugins = [
# 'sigal.plugins.adjust',
"sigal.plugins.compress_assets",
# 'sigal.plugins.copyright',
# 'sigal.plugins.encrypt',
# 'sigal.plugins.extended_caching',
# 'sigal.plugins.feeds',
# 'sigal.plugins.media_page',
# 'sigal.plugins.nomedia',
# 'sigal.plugins.upload_s3',
# 'sigal.plugins.watermark',
"sigal.plugins.zip_gallery",
]
# Adjust the image after resizing it. A default value of 1.0 leaves the images
# untouched.
# adjust_options = {'color': 1.0,
# 'brightness': 1.0,
# 'contrast': 1.0,
# 'sharpness': 1.0}
# Settings for compressing static assets
compress_assets_options = {"method": "brotli"}
# Add a copyright text on the image (default: '')
# copyright = "© An example copyright message"
# Settings for encryption plugin
# encrypt_options = {
# 'password': 'password',
# 'ask_password': False
# }
# Settings for upload to s3 plugin
# upload_s3_options = {
# 'bucket': 'my-bucket',
# 'policy': 'public-read',
# 'overwrite': False
# }
# Set zip_gallery to either False or a file name. The file name can
# be formatted python style with an 'album' variable, for example
# '{album.name}.zip'. The final archive will contain all resized or
# original files (depending on `zip_media_format`).
# zip_gallery = False # False or 'archive.zip'
zip_gallery = "{album.name}.zip"
# zip_media_format = 'resized' # 'resized' or 'orig'
# zip_skip_if_exists = False # Skip archive generation if archive is
# already present. Warning: new photos in an album won't be added to archive
| 33.019868
| 119
| 0.695347
|
source = "albums"
theme = "my-sigal-theme"
use_orig = True
img_size = (1076, 816)
essor = 'ResizeToFit'
# Autorotate images
# Warning: this setting is not compatible with `copy_exif_data` (see below),
# because Sigal can't save the modified Orientation tag (currently Pillow can't
# write EXIF).
# autorotate_images = True
# If True, EXIF data from the original image is copied to the resized image
# copy_exif_data = False
# Python's datetime format string used for the EXIF date formatting
umb_size = (200, 267)
thumb_fit = False
ignore_directories = []
ignore_files = []
index_in_url = True
links = [
(
"Original Reddit Thread (2018)",
"https://www.reddit.com/r/UnearthedArcana/comments/83w44y/42_full_page_watercolor_stains_for_the_homebrewery/",
),
(
"New Reddit Thread (2020)",
"https://www.reddit.com/r/UnearthedArcana/comments/iqpmek/301_full_page_watercolor_stains_for/",
),
("Gmbinder Guide", "https://www.gmbinder.com/share/-L4Yt8ZSxmhwqt--yNRT"),
("Homebrewery Guide", "https://homebrewery.naturalcrit.com/share/SkKsdJmKf"),
(
"Website Source (GitHub)",
"https://github.com/flamableconcrete/DnD-Watercolor-Gallery",
),
]
google_analytics = 'UA-187978011-1'
# can be done using 'files_to_copy':
# files_to_copy = (('extra/logo.png', 'static/logo.png'))
# site_logo = 'logo.png'
# --------
# Plugins
# --------
# List of plugins to use. The values must be a path than can be imported.
# Another option is to import the plugin and put the module in the list, but
# this will break with the multiprocessing feature (the settings dict obtained
# from this file must be serializable).
plugins = [
# 'sigal.plugins.adjust',
"sigal.plugins.compress_assets",
# 'sigal.plugins.copyright',
# 'sigal.plugins.encrypt',
# 'sigal.plugins.extended_caching',
# 'sigal.plugins.feeds',
# 'sigal.plugins.media_page',
# 'sigal.plugins.nomedia',
# 'sigal.plugins.upload_s3',
# 'sigal.plugins.watermark',
"sigal.plugins.zip_gallery",
]
# Adjust the image after resizing it. A default value of 1.0 leaves the images
# untouched.
# adjust_options = {'color': 1.0,
# 'brightness': 1.0,
# 'contrast': 1.0,
# 'sharpness': 1.0}
# Settings for compressing static assets
compress_assets_options = {"method": "brotli"}
# Add a copyright text on the image (default: '')
# copyright = "© An example copyright message"
# Settings for encryption plugin
# encrypt_options = {
# 'password': 'password',
# 'ask_password': False
# }
# Settings for upload to s3 plugin
# upload_s3_options = {
# 'bucket': 'my-bucket',
# 'policy': 'public-read',
# 'overwrite': False
# }
# Set zip_gallery to either False or a file name. The file name can
# be formatted python style with an 'album' variable, for example
# '{album.name}.zip'. The final archive will contain all resized or
# original files (depending on `zip_media_format`).
# zip_gallery = False # False or 'archive.zip'
zip_gallery = "{album.name}.zip"
# zip_media_format = 'resized' # 'resized' or 'orig'
# zip_skip_if_exists = False # Skip archive generation if archive is
# already present. Warning: new photos in an album won't be added to archive
| true
| true
|
1c49b9b3ba4bd5a9767f7b527ecc21f6732caf9f
| 687
|
py
|
Python
|
Content/Data Structures/Matrix.py
|
MovsisyanM/Data-Structures-And-Algos-Revisit
|
3bb128a4a5476914c164b1a3c1b533a8eace8604
|
[
"MIT"
] | 3
|
2020-12-24T16:49:14.000Z
|
2021-08-10T17:19:16.000Z
|
Content/Data Structures/Matrix.py
|
MovsisyanM/Data-Structures-And-Algos-Revisit
|
3bb128a4a5476914c164b1a3c1b533a8eace8604
|
[
"MIT"
] | null | null | null |
Content/Data Structures/Matrix.py
|
MovsisyanM/Data-Structures-And-Algos-Revisit
|
3bb128a4a5476914c164b1a3c1b533a8eace8604
|
[
"MIT"
] | 1
|
2020-12-25T15:37:36.000Z
|
2020-12-25T15:37:36.000Z
|
class Matrix:
"""No, not the movie.
A 2d array with many methods to make it act like a matrix"""
def __init__(self, size, fill_with=0):
assert (size >= 1), "Matrix size too small, must be positive integer"
this.size = math.floor(size)
this.mem = [[fill_with] * this.size] * this.size
def __getitem__(self, key):
return copy.copy(this.mem[key])
# this is where the fun begins!
def __mul__(self, matrix):
pass
# No need to worry about matrix mult. compatability since all of them are created squares
# TODO
# arr = np.random.rand(50) * 50
# InsertionSort(arr)
# print(IsSorted(arr))
# Code block by Movsisyan
| 25.444444
| 97
| 0.640466
|
class Matrix:
def __init__(self, size, fill_with=0):
assert (size >= 1), "Matrix size too small, must be positive integer"
this.size = math.floor(size)
this.mem = [[fill_with] * this.size] * this.size
def __getitem__(self, key):
return copy.copy(this.mem[key])
def __mul__(self, matrix):
pass
| true
| true
|
1c49ba3af2eaa48f8e0bb0d3f22a566b9e7df7f7
| 1,212
|
py
|
Python
|
tests/test_roi.py
|
EXLER/CILISSA
|
452b9233d0d4a5139b9ab022b9178d9cde832359
|
[
"MIT"
] | null | null | null |
tests/test_roi.py
|
EXLER/CILISSA
|
452b9233d0d4a5139b9ab022b9178d9cde832359
|
[
"MIT"
] | 1
|
2021-10-15T19:55:56.000Z
|
2021-10-15T19:55:56.000Z
|
tests/test_roi.py
|
EXLER/CILISSA
|
452b9233d0d4a5139b9ab022b9178d9cde832359
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from cilissa.images import Image, ImagePair
from cilissa.metrics import MSE
from cilissa.roi import ROI
from cilissa.transformations import Equalization
from tests.base import BaseTest
class TestROI(BaseTest):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.base_image = Image(Path(cls.data_path, "ref_images", "parrots.bmp"))
cls.transformed_image = Image(Path(cls.data_path, "other", "parrots_roi.bmp"))
cls.transformed_roi = ROI(0, 0, 384, 512)
cls.unchanged_roi = ROI(384, 0, 768, 512)
def test_roi_transformation(self) -> None:
result_image = self.base_image.copy()
cropped_image = result_image.crop(self.transformed_roi.slices)
transformed_image = Equalization().transform(cropped_image)
result_image.from_array(transformed_image.im, at=self.transformed_roi.slices)
self.assertEqual(result_image, self.transformed_image)
def test_roi_analysis(self) -> None:
pair = ImagePair(self.base_image, self.transformed_image)
pair.set_roi(self.unchanged_roi)
mse = MSE()
result = mse.analyze(pair)
self.assertEqual(result, 0)
| 32.756757
| 86
| 0.70462
|
from pathlib import Path
from cilissa.images import Image, ImagePair
from cilissa.metrics import MSE
from cilissa.roi import ROI
from cilissa.transformations import Equalization
from tests.base import BaseTest
class TestROI(BaseTest):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.base_image = Image(Path(cls.data_path, "ref_images", "parrots.bmp"))
cls.transformed_image = Image(Path(cls.data_path, "other", "parrots_roi.bmp"))
cls.transformed_roi = ROI(0, 0, 384, 512)
cls.unchanged_roi = ROI(384, 0, 768, 512)
def test_roi_transformation(self) -> None:
result_image = self.base_image.copy()
cropped_image = result_image.crop(self.transformed_roi.slices)
transformed_image = Equalization().transform(cropped_image)
result_image.from_array(transformed_image.im, at=self.transformed_roi.slices)
self.assertEqual(result_image, self.transformed_image)
def test_roi_analysis(self) -> None:
pair = ImagePair(self.base_image, self.transformed_image)
pair.set_roi(self.unchanged_roi)
mse = MSE()
result = mse.analyze(pair)
self.assertEqual(result, 0)
| true
| true
|
1c49bbbc3c51bc3b92d9cf824afce38dc820b85b
| 16,100
|
py
|
Python
|
tests/handlers/test_e2e_room_keys.py
|
cleveritcz/synapse
|
caead3e45968a9f753da7bc11ee588ab4efda858
|
[
"Apache-2.0"
] | 1
|
2019-05-01T11:05:51.000Z
|
2019-05-01T11:05:51.000Z
|
tests/handlers/test_e2e_room_keys.py
|
cleveritcz/synapse
|
caead3e45968a9f753da7bc11ee588ab4efda858
|
[
"Apache-2.0"
] | null | null | null |
tests/handlers/test_e2e_room_keys.py
|
cleveritcz/synapse
|
caead3e45968a9f753da7bc11ee588ab4efda858
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
# Copyright 2017 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import mock
from twisted.internet import defer
import synapse.api.errors
import synapse.handlers.e2e_room_keys
import synapse.storage
from synapse.api import errors
from tests import unittest, utils
# sample room_key data for use in the tests
room_keys = {
"rooms": {
"!abc:matrix.org": {
"sessions": {
"c0ff33": {
"first_message_index": 1,
"forwarded_count": 1,
"is_verified": False,
"session_data": "SSBBTSBBIEZJU0gK"
}
}
}
}
}
class E2eRoomKeysHandlerTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(E2eRoomKeysHandlerTestCase, self).__init__(*args, **kwargs)
self.hs = None # type: synapse.server.HomeServer
self.handler = None # type: synapse.handlers.e2e_keys.E2eRoomKeysHandler
@defer.inlineCallbacks
def setUp(self):
self.hs = yield utils.setup_test_homeserver(
self.addCleanup,
handlers=None,
replication_layer=mock.Mock(),
)
self.handler = synapse.handlers.e2e_room_keys.E2eRoomKeysHandler(self.hs)
self.local_user = "@boris:" + self.hs.hostname
@defer.inlineCallbacks
def test_get_missing_current_version_info(self):
"""Check that we get a 404 if we ask for info about the current version
if there is no version.
"""
res = None
try:
yield self.handler.get_version_info(self.local_user)
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_get_missing_version_info(self):
"""Check that we get a 404 if we ask for info about a specific version
if it doesn't exist.
"""
res = None
try:
yield self.handler.get_version_info(self.local_user, "bogus_version")
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_create_version(self):
"""Check that we can create and then retrieve versions.
"""
res = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(res, "1")
# check we can retrieve it as the current version
res = yield self.handler.get_version_info(self.local_user)
self.assertDictEqual(res, {
"version": "1",
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
# check we can retrieve it as a specific version
res = yield self.handler.get_version_info(self.local_user, "1")
self.assertDictEqual(res, {
"version": "1",
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
# upload a new one...
res = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "second_version_auth_data",
})
self.assertEqual(res, "2")
# check we can retrieve it as the current version
res = yield self.handler.get_version_info(self.local_user)
self.assertDictEqual(res, {
"version": "2",
"algorithm": "m.megolm_backup.v1",
"auth_data": "second_version_auth_data",
})
@defer.inlineCallbacks
def test_update_version(self):
"""Check that we can update versions.
"""
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
res = yield self.handler.update_version(self.local_user, version, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "revised_first_version_auth_data",
"version": version
})
self.assertDictEqual(res, {})
# check we can retrieve it as the current version
res = yield self.handler.get_version_info(self.local_user)
self.assertDictEqual(res, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "revised_first_version_auth_data",
"version": version
})
@defer.inlineCallbacks
def test_update_missing_version(self):
"""Check that we get a 404 on updating nonexistent versions
"""
res = None
try:
yield self.handler.update_version(self.local_user, "1", {
"algorithm": "m.megolm_backup.v1",
"auth_data": "revised_first_version_auth_data",
"version": "1"
})
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_update_bad_version(self):
"""Check that we get a 400 if the version in the body is missing or
doesn't match
"""
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
res = None
try:
yield self.handler.update_version(self.local_user, version, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "revised_first_version_auth_data"
})
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 400)
res = None
try:
yield self.handler.update_version(self.local_user, version, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "revised_first_version_auth_data",
"version": "incorrect"
})
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 400)
@defer.inlineCallbacks
def test_delete_missing_version(self):
"""Check that we get a 404 on deleting nonexistent versions
"""
res = None
try:
yield self.handler.delete_version(self.local_user, "1")
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_delete_missing_current_version(self):
"""Check that we get a 404 on deleting nonexistent current version
"""
res = None
try:
yield self.handler.delete_version(self.local_user)
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_delete_version(self):
"""Check that we can create and then delete versions.
"""
res = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(res, "1")
# check we can delete it
yield self.handler.delete_version(self.local_user, "1")
# check that it's gone
res = None
try:
yield self.handler.get_version_info(self.local_user, "1")
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_get_missing_backup(self):
"""Check that we get a 404 on querying missing backup
"""
res = None
try:
yield self.handler.get_room_keys(self.local_user, "bogus_version")
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_get_missing_room_keys(self):
"""Check we get an empty response from an empty backup
"""
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
res = yield self.handler.get_room_keys(self.local_user, version)
self.assertDictEqual(res, {
"rooms": {}
})
# TODO: test the locking semantics when uploading room_keys,
# although this is probably best done in sytest
@defer.inlineCallbacks
def test_upload_room_keys_no_versions(self):
"""Check that we get a 404 on uploading keys when no versions are defined
"""
res = None
try:
yield self.handler.upload_room_keys(self.local_user, "no_version", room_keys)
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_upload_room_keys_bogus_version(self):
"""Check that we get a 404 on uploading keys when an nonexistent version
is specified
"""
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
res = None
try:
yield self.handler.upload_room_keys(
self.local_user, "bogus_version", room_keys
)
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_upload_room_keys_wrong_version(self):
"""Check that we get a 403 on uploading keys for an old version
"""
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "second_version_auth_data",
})
self.assertEqual(version, "2")
res = None
try:
yield self.handler.upload_room_keys(self.local_user, "1", room_keys)
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 403)
@defer.inlineCallbacks
def test_upload_room_keys_insert(self):
"""Check that we can insert and retrieve keys for a session
"""
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
yield self.handler.upload_room_keys(self.local_user, version, room_keys)
res = yield self.handler.get_room_keys(self.local_user, version)
self.assertDictEqual(res, room_keys)
# check getting room_keys for a given room
res = yield self.handler.get_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org"
)
self.assertDictEqual(res, room_keys)
# check getting room_keys for a given session_id
res = yield self.handler.get_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org",
session_id="c0ff33",
)
self.assertDictEqual(res, room_keys)
@defer.inlineCallbacks
def test_upload_room_keys_merge(self):
"""Check that we can upload a new room_key for an existing session and
have it correctly merged"""
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
yield self.handler.upload_room_keys(self.local_user, version, room_keys)
new_room_keys = copy.deepcopy(room_keys)
new_room_key = new_room_keys['rooms']['!abc:matrix.org']['sessions']['c0ff33']
# test that increasing the message_index doesn't replace the existing session
new_room_key['first_message_index'] = 2
new_room_key['session_data'] = 'new'
yield self.handler.upload_room_keys(self.local_user, version, new_room_keys)
res = yield self.handler.get_room_keys(self.local_user, version)
self.assertEqual(
res['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'],
"SSBBTSBBIEZJU0gK"
)
# test that marking the session as verified however /does/ replace it
new_room_key['is_verified'] = True
yield self.handler.upload_room_keys(self.local_user, version, new_room_keys)
res = yield self.handler.get_room_keys(self.local_user, version)
self.assertEqual(
res['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'],
"new"
)
# test that a session with a higher forwarded_count doesn't replace one
# with a lower forwarding count
new_room_key['forwarded_count'] = 2
new_room_key['session_data'] = 'other'
yield self.handler.upload_room_keys(self.local_user, version, new_room_keys)
res = yield self.handler.get_room_keys(self.local_user, version)
self.assertEqual(
res['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'],
"new"
)
# TODO: check edge cases as well as the common variations here
@defer.inlineCallbacks
def test_delete_room_keys(self):
"""Check that we can insert and delete keys for a session
"""
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
# check for bulk-delete
yield self.handler.upload_room_keys(self.local_user, version, room_keys)
yield self.handler.delete_room_keys(self.local_user, version)
res = yield self.handler.get_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org",
session_id="c0ff33",
)
self.assertDictEqual(res, {
"rooms": {}
})
# check for bulk-delete per room
yield self.handler.upload_room_keys(self.local_user, version, room_keys)
yield self.handler.delete_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org",
)
res = yield self.handler.get_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org",
session_id="c0ff33",
)
self.assertDictEqual(res, {
"rooms": {}
})
# check for bulk-delete per session
yield self.handler.upload_room_keys(self.local_user, version, room_keys)
yield self.handler.delete_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org",
session_id="c0ff33",
)
res = yield self.handler.get_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org",
session_id="c0ff33",
)
self.assertDictEqual(res, {
"rooms": {}
})
| 34.623656
| 89
| 0.605093
|
import copy
import mock
from twisted.internet import defer
import synapse.api.errors
import synapse.handlers.e2e_room_keys
import synapse.storage
from synapse.api import errors
from tests import unittest, utils
room_keys = {
"rooms": {
"!abc:matrix.org": {
"sessions": {
"c0ff33": {
"first_message_index": 1,
"forwarded_count": 1,
"is_verified": False,
"session_data": "SSBBTSBBIEZJU0gK"
}
}
}
}
}
class E2eRoomKeysHandlerTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(E2eRoomKeysHandlerTestCase, self).__init__(*args, **kwargs)
self.hs = None
self.handler = None
@defer.inlineCallbacks
def setUp(self):
self.hs = yield utils.setup_test_homeserver(
self.addCleanup,
handlers=None,
replication_layer=mock.Mock(),
)
self.handler = synapse.handlers.e2e_room_keys.E2eRoomKeysHandler(self.hs)
self.local_user = "@boris:" + self.hs.hostname
@defer.inlineCallbacks
def test_get_missing_current_version_info(self):
res = None
try:
yield self.handler.get_version_info(self.local_user)
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_get_missing_version_info(self):
res = None
try:
yield self.handler.get_version_info(self.local_user, "bogus_version")
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_create_version(self):
res = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(res, "1")
res = yield self.handler.get_version_info(self.local_user)
self.assertDictEqual(res, {
"version": "1",
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
res = yield self.handler.get_version_info(self.local_user, "1")
self.assertDictEqual(res, {
"version": "1",
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
res = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "second_version_auth_data",
})
self.assertEqual(res, "2")
res = yield self.handler.get_version_info(self.local_user)
self.assertDictEqual(res, {
"version": "2",
"algorithm": "m.megolm_backup.v1",
"auth_data": "second_version_auth_data",
})
@defer.inlineCallbacks
def test_update_version(self):
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
res = yield self.handler.update_version(self.local_user, version, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "revised_first_version_auth_data",
"version": version
})
self.assertDictEqual(res, {})
res = yield self.handler.get_version_info(self.local_user)
self.assertDictEqual(res, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "revised_first_version_auth_data",
"version": version
})
@defer.inlineCallbacks
def test_update_missing_version(self):
res = None
try:
yield self.handler.update_version(self.local_user, "1", {
"algorithm": "m.megolm_backup.v1",
"auth_data": "revised_first_version_auth_data",
"version": "1"
})
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_update_bad_version(self):
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
res = None
try:
yield self.handler.update_version(self.local_user, version, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "revised_first_version_auth_data"
})
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 400)
res = None
try:
yield self.handler.update_version(self.local_user, version, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "revised_first_version_auth_data",
"version": "incorrect"
})
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 400)
@defer.inlineCallbacks
def test_delete_missing_version(self):
res = None
try:
yield self.handler.delete_version(self.local_user, "1")
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_delete_missing_current_version(self):
res = None
try:
yield self.handler.delete_version(self.local_user)
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_delete_version(self):
res = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(res, "1")
yield self.handler.delete_version(self.local_user, "1")
res = None
try:
yield self.handler.get_version_info(self.local_user, "1")
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_get_missing_backup(self):
res = None
try:
yield self.handler.get_room_keys(self.local_user, "bogus_version")
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_get_missing_room_keys(self):
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
res = yield self.handler.get_room_keys(self.local_user, version)
self.assertDictEqual(res, {
"rooms": {}
})
# TODO: test the locking semantics when uploading room_keys,
# although this is probably best done in sytest
@defer.inlineCallbacks
def test_upload_room_keys_no_versions(self):
res = None
try:
yield self.handler.upload_room_keys(self.local_user, "no_version", room_keys)
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_upload_room_keys_bogus_version(self):
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
res = None
try:
yield self.handler.upload_room_keys(
self.local_user, "bogus_version", room_keys
)
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 404)
@defer.inlineCallbacks
def test_upload_room_keys_wrong_version(self):
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "second_version_auth_data",
})
self.assertEqual(version, "2")
res = None
try:
yield self.handler.upload_room_keys(self.local_user, "1", room_keys)
except errors.SynapseError as e:
res = e.code
self.assertEqual(res, 403)
@defer.inlineCallbacks
def test_upload_room_keys_insert(self):
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
yield self.handler.upload_room_keys(self.local_user, version, room_keys)
res = yield self.handler.get_room_keys(self.local_user, version)
self.assertDictEqual(res, room_keys)
# check getting room_keys for a given room
res = yield self.handler.get_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org"
)
self.assertDictEqual(res, room_keys)
# check getting room_keys for a given session_id
res = yield self.handler.get_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org",
session_id="c0ff33",
)
self.assertDictEqual(res, room_keys)
@defer.inlineCallbacks
def test_upload_room_keys_merge(self):
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
yield self.handler.upload_room_keys(self.local_user, version, room_keys)
new_room_keys = copy.deepcopy(room_keys)
new_room_key = new_room_keys['rooms']['!abc:matrix.org']['sessions']['c0ff33']
# test that increasing the message_index doesn't replace the existing session
new_room_key['first_message_index'] = 2
new_room_key['session_data'] = 'new'
yield self.handler.upload_room_keys(self.local_user, version, new_room_keys)
res = yield self.handler.get_room_keys(self.local_user, version)
self.assertEqual(
res['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'],
"SSBBTSBBIEZJU0gK"
)
new_room_key['is_verified'] = True
yield self.handler.upload_room_keys(self.local_user, version, new_room_keys)
res = yield self.handler.get_room_keys(self.local_user, version)
self.assertEqual(
res['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'],
"new"
)
# with a lower forwarding count
new_room_key['forwarded_count'] = 2
new_room_key['session_data'] = 'other'
yield self.handler.upload_room_keys(self.local_user, version, new_room_keys)
res = yield self.handler.get_room_keys(self.local_user, version)
self.assertEqual(
res['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'],
"new"
)
# TODO: check edge cases as well as the common variations here
@defer.inlineCallbacks
def test_delete_room_keys(self):
version = yield self.handler.create_version(self.local_user, {
"algorithm": "m.megolm_backup.v1",
"auth_data": "first_version_auth_data",
})
self.assertEqual(version, "1")
# check for bulk-delete
yield self.handler.upload_room_keys(self.local_user, version, room_keys)
yield self.handler.delete_room_keys(self.local_user, version)
res = yield self.handler.get_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org",
session_id="c0ff33",
)
self.assertDictEqual(res, {
"rooms": {}
})
# check for bulk-delete per room
yield self.handler.upload_room_keys(self.local_user, version, room_keys)
yield self.handler.delete_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org",
)
res = yield self.handler.get_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org",
session_id="c0ff33",
)
self.assertDictEqual(res, {
"rooms": {}
})
# check for bulk-delete per session
yield self.handler.upload_room_keys(self.local_user, version, room_keys)
yield self.handler.delete_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org",
session_id="c0ff33",
)
res = yield self.handler.get_room_keys(
self.local_user,
version,
room_id="!abc:matrix.org",
session_id="c0ff33",
)
self.assertDictEqual(res, {
"rooms": {}
})
| true
| true
|
1c49bbdc54235d9fd6a0bca037e561e4f50df361
| 18,581
|
py
|
Python
|
store/models.py
|
olaoluwa-98/tetris
|
2daf4f7dc24c655cadc71394aea2aa68879bf6ea
|
[
"MIT"
] | null | null | null |
store/models.py
|
olaoluwa-98/tetris
|
2daf4f7dc24c655cadc71394aea2aa68879bf6ea
|
[
"MIT"
] | null | null | null |
store/models.py
|
olaoluwa-98/tetris
|
2daf4f7dc24c655cadc71394aea2aa68879bf6ea
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.db import models
from django.contrib.auth.models import AbstractUser, UserManager
from django.contrib.auth import get_user_model
from django.utils.crypto import get_random_string
from .addresses import STATES
from autoslug import AutoSlugField
from django.urls import reverse
from phonenumber_field.modelfields import PhoneNumberField
# this returns the location of the uploaded profile picture
def get_profile_pic_path(instance, filename):
return 'profile_pictures/{}-{}'.format(instance.user.username, filename)
class User(AbstractUser):
email = models.EmailField( verbose_name='email address', unique=True)
email_token = models.CharField(verbose_name='email token', max_length=16, editable=False, null=True)
is_verified = models.BooleanField(default=False)
phone = PhoneNumberField(default='',
help_text='Please use the following format: <em>+234 XXX XXX XXXX</em>.',
)
profile_pic_path = models.ImageField(upload_to=get_profile_pic_path, max_length=255)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
# Override models save method:
def save(self, *args, **kwargs):
# generate email_token for the user
# email_token must be unique
self.email_token = '{}{}'.format(self.email[:2], get_random_string(length=14))
while User.objects.filter(email_token=self.email_token).exists():
self.email_token = '{}{}'.format(self.email[:2], get_random_string(length=14))
super(User, self).save(*args, **kwargs)
def __str__(self):
return '{}'.format(self.email)
class Brand(models.Model):
name = models.CharField(max_length=40, unique=True, verbose_name='name of the brand' )
email = models.EmailField( max_length=50, verbose_name='email address of the brand')
phone = PhoneNumberField(blank=True, null=True,
help_text='Please use the following format: <em>+234 XXX XXX XXXX</em>.',
)
desc = models.CharField(max_length=255, verbose_name='description of brand', blank=True, null=True)
brand_image_url = models.ImageField(upload_to='img/brands/', max_length=255, blank=True, null=True)
slug = AutoSlugField(populate_from='name',
unique=True,
sep='',
)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date brand was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date brand details were updated last' )
def get_absolute_url(self):
return reverse('store:brand', kwargs={'slug': self.slug})
def get_carts(self):
return Cart.objects.filter(product__brand=self).order_by('-created_at')
def get_wishes(self):
return Wish.objects.filter(product__brand=self).order_by('-created_at')
def get_orders(self):
return OrderItem.objects.filter(product__brand=self).order_by('-created_at')
def random_product_images(self):
from django.db.models import Count
products = list(self.products.all()[:3])
images = []
if len(products) > 0:
for product in products:
if product.product_images.first():
images.append(product.product_images.first())
if len(images) > 0:
return images
return None
def __str__(self):
return '{}'.format(self.name)
class Meta:
get_latest_by = 'created_at'
class ShippingAddress(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='shipping_addresses',
verbose_name ='Customer'
)
is_default = models.BooleanField(default=False)
zip_code = models.CharField( max_length=10, verbose_name='zip code' )
address = models.CharField( max_length=60, verbose_name='address' )
city = models.CharField( max_length=30, verbose_name='city' )
state = models.CharField( max_length=15, verbose_name='state', choices=STATES )
country = models.CharField( max_length=30, default='Nigeria', verbose_name='country' )
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date added'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date shipping address details were updated last' )
def __str__(self):
return '{}, {}. ({})'.format(self.city, self.state, self.user.username)
class Meta:
verbose_name_plural = 'Shipping Addresses'
get_latest_by = 'created_at'
ordering = ['user_id',]
class ProductCategory(models.Model):
name = models.CharField(max_length=30, unique=True, verbose_name='name of category')
CAT_TYPES = (
('top', 'Top'),
('bottom', 'Bottom'),
('accessory', 'Accessory'),
('foot', 'Footwear'),
('other', 'Other')
)
cat_type = models.CharField(max_length=10, choices=CAT_TYPES, verbose_name='type of category')
desc = models.CharField(max_length=255, verbose_name='description of product category', blank=True, null=True)
cat_image_url = models.ImageField(upload_to='img/product_categories/', max_length=255, blank=True, null=True)
slug = AutoSlugField(populate_from='name',
unique=True,
sep='',
)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date product category was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date product category details were updated last')
def get_absolute_url(self):
return reverse('store:category', kwargs={'slug': self.slug})
def random_product_images(self):
from django.db.models import Count
products = list(self.products.all()[:3])
images = []
if len(products) > 0:
for product in products:
if product.product_images.first():
images.append(product.product_images.first())
if len(images) > 0:
return images
return None
def __str__(self):
return '{} ({})'.format(self.name, self.cat_type)
class Meta:
get_latest_by = 'created_at'
verbose_name_plural = 'Product Categories'
ordering = ['name',]
class Size(models.Model):
category = models.ForeignKey(
ProductCategory,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='sizes',
verbose_name ='Category'
)
size_format = models.CharField(max_length=15, verbose_name='size format e.g UK, US')
value = models.CharField(max_length=10, verbose_name='size value e.g 43 or XL')
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date size was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date size details were updated last' )
def __str__(self):
return '{} - {} {}'.format(self.size_format, self.value, self.category.name)
class Meta:
get_latest_by = 'created_at'
ordering = ['value']
class Product(models.Model):
admin = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
related_name='products',
verbose_name ='Staff',
blank=True,
null=True
)
brand = models.ForeignKey(
Brand,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='products',
verbose_name ='Brand'
)
category = models.ForeignKey(
ProductCategory,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='products',
verbose_name ='Category'
)
GENDER = (
('male', 'Male'),
('female', 'Female'),
('unisex', 'Unisex')
)
COLOURS = (
('blue', 'Blue'),
('red', 'Red'),
('white', 'White'),
('black', 'Black'),
('green', 'Green'),
('purple', 'Purple'),
('yellow', 'Yellow'),
('gray', 'Gray'),
('khaki', 'Khaki'),
('brown', 'Brown'),
('orange', 'Orange'),
('navy blue', 'Navy Blue'),
('transparent', 'Transparent'),
('gold', 'Gold'),
('silver', 'Silver'),
)
SIZES = (
('EUR-39', 'EUR 39'),
)
name = models.CharField(max_length=50, verbose_name='name')
desc = models.CharField(max_length=255, verbose_name='description', blank=True, null=True)
gender = models.CharField(max_length=15, choices=GENDER, verbose_name='gender')
size = models.ForeignKey(
Size,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='products',
verbose_name ='Product'
)
colour = models.CharField(max_length=15, verbose_name='colour', choices=COLOURS)
price_per_unit = models.DecimalField(decimal_places=2, max_digits=17, verbose_name='price (₦)')
quantity = models.PositiveIntegerField(verbose_name='quantity left')
num_deliveries = models.PositiveIntegerField(verbose_name='deliveries', default=0)
orders_count = models.PositiveIntegerField(verbose_name='orders', default=0)
slug = AutoSlugField(populate_from='name',
unique=True,
sep='',
)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date added'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date product details were updated last' )
def get_absolute_url(self):
return reverse('store:product', kwargs={'slug': self.slug})
def __str__(self):
return '{}'.format(self.name)
class Meta:
get_latest_by = 'created_at'
ordering = ['-created_at', 'admin_id', 'name']
class Wish(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='wishes',
verbose_name ='Owner'
)
product = models.ForeignKey(
Product,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='wishes',
verbose_name ='Product'
)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date wish was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date wish details were updated last' )
# Override models save method:
def save(self, *args, **kwargs):
# check if wish product already exists, if it does ignore
if Wish.objects.filter(user_id=self.user_id, product_id=self.product_id).exists():
pass
else:
super(Wish, self).save(*args, **kwargs)
def __str__(self):
return '{} -> {}'.format(self.user.username, self.product.name)
class Meta:
get_latest_by = 'created_at'
verbose_name_plural = 'wishes'
ordering = ['-created_at', 'user_id']
class Cart(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='cart',
verbose_name ='Owner',
blank=True,
null=True
)
product = models.ForeignKey(
Product,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='cart',
verbose_name ='product in the cart'
)
quantity = models.PositiveIntegerField(verbose_name='quantity of the product added')
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date cart product was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date cart product details were updated last' )
# Override models save method:
def save(self, *args, **kwargs):
# check if cart product already exists, add more quantity to it
if not self.pk:
cart = Cart.objects.filter(user=self.user, product_id=self.product_id)
if cart.exists():
cart_item = cart.first()
cart_item.quantity += int(self.quantity)
super(Cart, cart_item).save(*args, **kwargs)
else:
super(Cart, self).save(*args, **kwargs)
else:
super(Cart, self).save(*args, **kwargs)
def __str__(self):
return 'x{} {} -> {}'.format(self.quantity, self.user, self.product.name)
class Meta:
get_latest_by = 'created_at'
ordering = ['-created_at', 'user_id']
class Order(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
blank=True,
null=True,
related_name='orders',
verbose_name ='Customer'
)
shipping_address = models.ForeignKey(
ShippingAddress,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='orders',
verbose_name ='shipping address',
)
ORDER_STATUS = (
('pending', 'Pending'),
('processing', 'Processing'),
('delivered', 'Delivered'),
('cancelled', 'Cancelled')
)
ref = models.CharField(verbose_name='reference',max_length=100,null=True,blank=True,
help_text='this field is generated automatically'
)
reason_cancelled = models.CharField(verbose_name='if order is cancelled, why?',max_length=100,blank=True,null=True)
canceller = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='cancellers',
verbose_name ='the canceller',
)
status = models.CharField(choices=ORDER_STATUS, default='pending', max_length=100,
verbose_name='status'
)
deliver_date = models.DateTimeField(null=True, blank=True,
verbose_name='delivered (tetris)'
)
confirm_delivery_date = models.DateTimeField(null=True, blank=True,
verbose_name='confirmed delivered (customer)'
)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date ordered'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date order details were updated last' )
def subtotal(self):
from django.db.models import Sum, F
total = self.order_items.aggregate( subtotal=Sum(F('price_per_unit') * F('quantity'), output_field=models.DecimalField()))
if total['subtotal']:
return total['subtotal']
return 0
def get_absolute_url(self):
return reverse('store:order', kwargs={'ref': self.ref})
# Override models save method:
def save(self, *args, **kwargs):
if not self.pk:
# generate reference for the order
# order reference must be unique
self.ref = get_random_string(length=16)
while Order.objects.filter(ref=self.ref).exists():
self.ref = get_random_string(length=16)
super(Order, self).save(*args, **kwargs)
def __str__(self):
return '{} ordered {} [{}]'.format(self.user.username, self.ref, self.status)
class Meta:
get_latest_by = 'created_at'
ordering = ['-created_at', 'user_id']
# permissions = (
# ('change_status_to_processing', 'Change Status to Processing'),
# ('change_status_to_pending', 'Change Status to Pending'),
# )
class OrderItem(models.Model):
order = models.ForeignKey(
Order,
on_delete=models.CASCADE,
blank=True,
null=True,
related_name='order_items',
verbose_name ='order'
)
product = models.ForeignKey(
Product,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='order_items',
verbose_name ='product ordered'
)
quantity = models.PositiveIntegerField(verbose_name='quantity ordered')
price_per_unit = models.DecimalField(decimal_places=2, max_digits=17)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date ordered'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date order details were updated last' )
def __str__(self):
return 'x{} {} [{}]'.format(self.quantity, self.product.name, self.order.ref)
class Meta:
verbose_name_plural = 'Order Items'
get_latest_by = 'created_at'
ordering = ['order',]
class ProductImage(models.Model):
product = models.ForeignKey(
Product,
on_delete=models.CASCADE,
related_name='product_images',
verbose_name ='product image belongs to'
)
product_image_url = models.ImageField(upload_to='img/products/', max_length=255, blank=True)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date image was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date image was updated last' )
def __str__(self):
return '{}\'s image'.format(self.product.name)
class Meta:
verbose_name_plural = 'Product Images'
get_latest_by = 'created_at'
ordering = ['product_id',]
class Feedback(models.Model):
email = models.EmailField( verbose_name='email address')
content = models.TextField(verbose_name='feedback')
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='feedback was made'
)
def __str__(self):
return '{}\'s feedback'.format(self.email)
class TetrisImage(models.Model):
TYPES = (
('background-image', 'Background Image (1366px by 738px )'),
('default-product-img', 'Default Product Image (should be a perfect square)'),
('default-brand-img', 'Default Brand Image (should be a perfect square)'),
('default-category-img', 'Default Category Image (should be a perfect square)'),
)
name = models.CharField(max_length=30, verbose_name='name of the image', choices=TYPES)
description = models.TextField(verbose_name='description of the image', blank=True, null=True )
image_url = models.ImageField(upload_to='img/tetris-img/', max_length=255, blank=True)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date image was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date image was updated last' )
def __str__(self):
return self.name
| 36.220273
| 130
| 0.645875
|
from django.conf import settings
from django.db import models
from django.contrib.auth.models import AbstractUser, UserManager
from django.contrib.auth import get_user_model
from django.utils.crypto import get_random_string
from .addresses import STATES
from autoslug import AutoSlugField
from django.urls import reverse
from phonenumber_field.modelfields import PhoneNumberField
def get_profile_pic_path(instance, filename):
return 'profile_pictures/{}-{}'.format(instance.user.username, filename)
class User(AbstractUser):
email = models.EmailField( verbose_name='email address', unique=True)
email_token = models.CharField(verbose_name='email token', max_length=16, editable=False, null=True)
is_verified = models.BooleanField(default=False)
phone = PhoneNumberField(default='',
help_text='Please use the following format: <em>+234 XXX XXX XXXX</em>.',
)
profile_pic_path = models.ImageField(upload_to=get_profile_pic_path, max_length=255)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def save(self, *args, **kwargs):
self.email_token = '{}{}'.format(self.email[:2], get_random_string(length=14))
while User.objects.filter(email_token=self.email_token).exists():
self.email_token = '{}{}'.format(self.email[:2], get_random_string(length=14))
super(User, self).save(*args, **kwargs)
def __str__(self):
return '{}'.format(self.email)
class Brand(models.Model):
name = models.CharField(max_length=40, unique=True, verbose_name='name of the brand' )
email = models.EmailField( max_length=50, verbose_name='email address of the brand')
phone = PhoneNumberField(blank=True, null=True,
help_text='Please use the following format: <em>+234 XXX XXX XXXX</em>.',
)
desc = models.CharField(max_length=255, verbose_name='description of brand', blank=True, null=True)
brand_image_url = models.ImageField(upload_to='img/brands/', max_length=255, blank=True, null=True)
slug = AutoSlugField(populate_from='name',
unique=True,
sep='',
)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date brand was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date brand details were updated last' )
def get_absolute_url(self):
return reverse('store:brand', kwargs={'slug': self.slug})
def get_carts(self):
return Cart.objects.filter(product__brand=self).order_by('-created_at')
def get_wishes(self):
return Wish.objects.filter(product__brand=self).order_by('-created_at')
def get_orders(self):
return OrderItem.objects.filter(product__brand=self).order_by('-created_at')
def random_product_images(self):
from django.db.models import Count
products = list(self.products.all()[:3])
images = []
if len(products) > 0:
for product in products:
if product.product_images.first():
images.append(product.product_images.first())
if len(images) > 0:
return images
return None
def __str__(self):
return '{}'.format(self.name)
class Meta:
get_latest_by = 'created_at'
class ShippingAddress(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='shipping_addresses',
verbose_name ='Customer'
)
is_default = models.BooleanField(default=False)
zip_code = models.CharField( max_length=10, verbose_name='zip code' )
address = models.CharField( max_length=60, verbose_name='address' )
city = models.CharField( max_length=30, verbose_name='city' )
state = models.CharField( max_length=15, verbose_name='state', choices=STATES )
country = models.CharField( max_length=30, default='Nigeria', verbose_name='country' )
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date added'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date shipping address details were updated last' )
def __str__(self):
return '{}, {}. ({})'.format(self.city, self.state, self.user.username)
class Meta:
verbose_name_plural = 'Shipping Addresses'
get_latest_by = 'created_at'
ordering = ['user_id',]
class ProductCategory(models.Model):
name = models.CharField(max_length=30, unique=True, verbose_name='name of category')
CAT_TYPES = (
('top', 'Top'),
('bottom', 'Bottom'),
('accessory', 'Accessory'),
('foot', 'Footwear'),
('other', 'Other')
)
cat_type = models.CharField(max_length=10, choices=CAT_TYPES, verbose_name='type of category')
desc = models.CharField(max_length=255, verbose_name='description of product category', blank=True, null=True)
cat_image_url = models.ImageField(upload_to='img/product_categories/', max_length=255, blank=True, null=True)
slug = AutoSlugField(populate_from='name',
unique=True,
sep='',
)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date product category was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date product category details were updated last')
def get_absolute_url(self):
return reverse('store:category', kwargs={'slug': self.slug})
def random_product_images(self):
from django.db.models import Count
products = list(self.products.all()[:3])
images = []
if len(products) > 0:
for product in products:
if product.product_images.first():
images.append(product.product_images.first())
if len(images) > 0:
return images
return None
def __str__(self):
return '{} ({})'.format(self.name, self.cat_type)
class Meta:
get_latest_by = 'created_at'
verbose_name_plural = 'Product Categories'
ordering = ['name',]
class Size(models.Model):
category = models.ForeignKey(
ProductCategory,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='sizes',
verbose_name ='Category'
)
size_format = models.CharField(max_length=15, verbose_name='size format e.g UK, US')
value = models.CharField(max_length=10, verbose_name='size value e.g 43 or XL')
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date size was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date size details were updated last' )
def __str__(self):
return '{} - {} {}'.format(self.size_format, self.value, self.category.name)
class Meta:
get_latest_by = 'created_at'
ordering = ['value']
class Product(models.Model):
admin = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
related_name='products',
verbose_name ='Staff',
blank=True,
null=True
)
brand = models.ForeignKey(
Brand,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='products',
verbose_name ='Brand'
)
category = models.ForeignKey(
ProductCategory,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='products',
verbose_name ='Category'
)
GENDER = (
('male', 'Male'),
('female', 'Female'),
('unisex', 'Unisex')
)
COLOURS = (
('blue', 'Blue'),
('red', 'Red'),
('white', 'White'),
('black', 'Black'),
('green', 'Green'),
('purple', 'Purple'),
('yellow', 'Yellow'),
('gray', 'Gray'),
('khaki', 'Khaki'),
('brown', 'Brown'),
('orange', 'Orange'),
('navy blue', 'Navy Blue'),
('transparent', 'Transparent'),
('gold', 'Gold'),
('silver', 'Silver'),
)
SIZES = (
('EUR-39', 'EUR 39'),
)
name = models.CharField(max_length=50, verbose_name='name')
desc = models.CharField(max_length=255, verbose_name='description', blank=True, null=True)
gender = models.CharField(max_length=15, choices=GENDER, verbose_name='gender')
size = models.ForeignKey(
Size,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='products',
verbose_name ='Product'
)
colour = models.CharField(max_length=15, verbose_name='colour', choices=COLOURS)
price_per_unit = models.DecimalField(decimal_places=2, max_digits=17, verbose_name='price (₦)')
quantity = models.PositiveIntegerField(verbose_name='quantity left')
num_deliveries = models.PositiveIntegerField(verbose_name='deliveries', default=0)
orders_count = models.PositiveIntegerField(verbose_name='orders', default=0)
slug = AutoSlugField(populate_from='name',
unique=True,
sep='',
)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date added'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date product details were updated last' )
def get_absolute_url(self):
return reverse('store:product', kwargs={'slug': self.slug})
def __str__(self):
return '{}'.format(self.name)
class Meta:
get_latest_by = 'created_at'
ordering = ['-created_at', 'admin_id', 'name']
class Wish(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='wishes',
verbose_name ='Owner'
)
product = models.ForeignKey(
Product,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='wishes',
verbose_name ='Product'
)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date wish was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date wish details were updated last' )
def save(self, *args, **kwargs):
if Wish.objects.filter(user_id=self.user_id, product_id=self.product_id).exists():
pass
else:
super(Wish, self).save(*args, **kwargs)
def __str__(self):
return '{} -> {}'.format(self.user.username, self.product.name)
class Meta:
get_latest_by = 'created_at'
verbose_name_plural = 'wishes'
ordering = ['-created_at', 'user_id']
class Cart(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='cart',
verbose_name ='Owner',
blank=True,
null=True
)
product = models.ForeignKey(
Product,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='cart',
verbose_name ='product in the cart'
)
quantity = models.PositiveIntegerField(verbose_name='quantity of the product added')
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date cart product was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date cart product details were updated last' )
def save(self, *args, **kwargs):
if not self.pk:
cart = Cart.objects.filter(user=self.user, product_id=self.product_id)
if cart.exists():
cart_item = cart.first()
cart_item.quantity += int(self.quantity)
super(Cart, cart_item).save(*args, **kwargs)
else:
super(Cart, self).save(*args, **kwargs)
else:
super(Cart, self).save(*args, **kwargs)
def __str__(self):
return 'x{} {} -> {}'.format(self.quantity, self.user, self.product.name)
class Meta:
get_latest_by = 'created_at'
ordering = ['-created_at', 'user_id']
class Order(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
blank=True,
null=True,
related_name='orders',
verbose_name ='Customer'
)
shipping_address = models.ForeignKey(
ShippingAddress,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='orders',
verbose_name ='shipping address',
)
ORDER_STATUS = (
('pending', 'Pending'),
('processing', 'Processing'),
('delivered', 'Delivered'),
('cancelled', 'Cancelled')
)
ref = models.CharField(verbose_name='reference',max_length=100,null=True,blank=True,
help_text='this field is generated automatically'
)
reason_cancelled = models.CharField(verbose_name='if order is cancelled, why?',max_length=100,blank=True,null=True)
canceller = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='cancellers',
verbose_name ='the canceller',
)
status = models.CharField(choices=ORDER_STATUS, default='pending', max_length=100,
verbose_name='status'
)
deliver_date = models.DateTimeField(null=True, blank=True,
verbose_name='delivered (tetris)'
)
confirm_delivery_date = models.DateTimeField(null=True, blank=True,
verbose_name='confirmed delivered (customer)'
)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date ordered'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date order details were updated last' )
def subtotal(self):
from django.db.models import Sum, F
total = self.order_items.aggregate( subtotal=Sum(F('price_per_unit') * F('quantity'), output_field=models.DecimalField()))
if total['subtotal']:
return total['subtotal']
return 0
def get_absolute_url(self):
return reverse('store:order', kwargs={'ref': self.ref})
def save(self, *args, **kwargs):
if not self.pk:
self.ref = get_random_string(length=16)
while Order.objects.filter(ref=self.ref).exists():
self.ref = get_random_string(length=16)
super(Order, self).save(*args, **kwargs)
def __str__(self):
return '{} ordered {} [{}]'.format(self.user.username, self.ref, self.status)
class Meta:
get_latest_by = 'created_at'
ordering = ['-created_at', 'user_id']
class OrderItem(models.Model):
order = models.ForeignKey(
Order,
on_delete=models.CASCADE,
blank=True,
null=True,
related_name='order_items',
verbose_name ='order'
)
product = models.ForeignKey(
Product,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='order_items',
verbose_name ='product ordered'
)
quantity = models.PositiveIntegerField(verbose_name='quantity ordered')
price_per_unit = models.DecimalField(decimal_places=2, max_digits=17)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date ordered'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date order details were updated last' )
def __str__(self):
return 'x{} {} [{}]'.format(self.quantity, self.product.name, self.order.ref)
class Meta:
verbose_name_plural = 'Order Items'
get_latest_by = 'created_at'
ordering = ['order',]
class ProductImage(models.Model):
product = models.ForeignKey(
Product,
on_delete=models.CASCADE,
related_name='product_images',
verbose_name ='product image belongs to'
)
product_image_url = models.ImageField(upload_to='img/products/', max_length=255, blank=True)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date image was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date image was updated last' )
def __str__(self):
return '{}\'s image'.format(self.product.name)
class Meta:
verbose_name_plural = 'Product Images'
get_latest_by = 'created_at'
ordering = ['product_id',]
class Feedback(models.Model):
email = models.EmailField( verbose_name='email address')
content = models.TextField(verbose_name='feedback')
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='feedback was made'
)
def __str__(self):
return '{}\'s feedback'.format(self.email)
class TetrisImage(models.Model):
TYPES = (
('background-image', 'Background Image (1366px by 738px )'),
('default-product-img', 'Default Product Image (should be a perfect square)'),
('default-brand-img', 'Default Brand Image (should be a perfect square)'),
('default-category-img', 'Default Category Image (should be a perfect square)'),
)
name = models.CharField(max_length=30, verbose_name='name of the image', choices=TYPES)
description = models.TextField(verbose_name='description of the image', blank=True, null=True )
image_url = models.ImageField(upload_to='img/tetris-img/', max_length=255, blank=True)
created_at = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name='date image was added to db'
)
updated_at = models.DateTimeField( auto_now=True, verbose_name='date image was updated last' )
def __str__(self):
return self.name
| true
| true
|
1c49bcf86193b8dd320433d8f581a02d71c51fc7
| 927
|
py
|
Python
|
PyPoll/main.py
|
amitpatel02-atl/Python-Challenge
|
e864b87b40c64fa7f5b6a3ad98da2f31c8028790
|
[
"ADSL"
] | null | null | null |
PyPoll/main.py
|
amitpatel02-atl/Python-Challenge
|
e864b87b40c64fa7f5b6a3ad98da2f31c8028790
|
[
"ADSL"
] | null | null | null |
PyPoll/main.py
|
amitpatel02-atl/Python-Challenge
|
e864b87b40c64fa7f5b6a3ad98da2f31c8028790
|
[
"ADSL"
] | null | null | null |
# First we'll import the os module
# This will allow us to create file paths across operating systems
import os
# Module for reading CSV files
import csv
# Set path for file
csvpath = os.path.join("election_data.csv")
#Open the csv
with open(csvpath) as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
#Read the header row first (skip this step if there is now header)
csv_header = next(csvreader)
print(f"CSV Header: {csv_header}")
# Create a variable and set it as an List
list_of_candidates = ["Khan", "Correy", "Li", "O'Tooley"]
total_votes = 0
#Loop through looking for the votes then add to counter
vote_counter = 0
for row in csvreader:
#Count number of votes
vote_counter= (vote_counter+ 1)
print("Election Results")
print("------------------")
print(f"Total Votes : {vote_counter}")
print("Khan")
print("Correy")
print("Li")
print("O'Tooley")
| 24.394737
| 70
| 0.674218
|
# This will allow us to create file paths across operating systems
import os
# Module for reading CSV files
import csv
# Set path for file
csvpath = os.path.join("election_data.csv")
#Open the csv
with open(csvpath) as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
#Read the header row first (skip this step if there is now header)
csv_header = next(csvreader)
print(f"CSV Header: {csv_header}")
# Create a variable and set it as an List
list_of_candidates = ["Khan", "Correy", "Li", "O'Tooley"]
total_votes = 0
vote_counter = 0
for row in csvreader:
vote_counter= (vote_counter+ 1)
print("Election Results")
print("------------------")
print(f"Total Votes : {vote_counter}")
print("Khan")
print("Correy")
print("Li")
print("O'Tooley")
| true
| true
|
1c49bd27ad29632093673b01d9d464f573e77386
| 5,118
|
py
|
Python
|
discord/ui/modal.py
|
DeadPool3333/enhanced-discord.py
|
be34c7e521c9edbe6d8ff949962c0ab777821712
|
[
"MIT"
] | 4
|
2021-09-28T12:45:00.000Z
|
2022-02-04T20:11:58.000Z
|
discord/ui/modal.py
|
DeadPool3333/enhanced-discord.py
|
be34c7e521c9edbe6d8ff949962c0ab777821712
|
[
"MIT"
] | 2
|
2021-11-07T12:31:33.000Z
|
2022-01-06T17:06:51.000Z
|
discord/ui/modal.py
|
DeadPool3333/enhanced-discord.py
|
be34c7e521c9edbe6d8ff949962c0ab777821712
|
[
"MIT"
] | 8
|
2022-01-10T22:26:03.000Z
|
2022-02-25T14:26:04.000Z
|
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import os
import asyncio
import sys
import traceback
from .item import Item
from itertools import groupby
from .view import _ViewWeights as _ModalWeights
from ..interactions import Interaction
if TYPE_CHECKING:
from ..state import ConnectionState
__all__ = ("Modal",)
class Modal:
"""Represents a UI Modal.
This object must be inherited to create a UI within Discord.
.. versionadded:: 2.0
Parameters
------------
title: :class:`str`
The title of the modal.
custom_id: Optional[:class:`str`]
The ID of the modal that gets received during an interaction.
Attributes
------------
title: :class:`str`
The title of the modal.
custom_id: Optional[:class:`str`]
The ID of the modal that gets received during an interaction.
children: List[:class:`Item`]
The children of the modal.
"""
def __init__(self, title: str, custom_id: Optional[str] = None) -> None:
self.title = title
self.custom_id = custom_id or os.urandom(16).hex()
self.children: List[Item] = []
self.__weights = _ModalWeights(self.children)
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.title=} {self.custom_id=}>"
def add_item(self, item: Item):
if not isinstance(item, Item):
raise TypeError(f"expected Item not {item.__class__!r}")
if len(self.children) > 5:
raise ValueError("Modal can only have a maximum of 5 items")
self.__weights.add_item(item)
self.children.append(item)
def remove_item(self, item: Item):
try:
self.children.remove(item)
except ValueError:
pass
else:
self.__weights.remove_item(item)
def to_components(self) -> List[Dict[str, Any]]:
def key(item: Item) -> int:
return item._rendered_row or 0
children = sorted(self.children, key=key)
components: List[Dict[str, Any]] = []
for _, group in groupby(children, key=key):
children = [item.to_component_dict() for item in group]
if not children:
continue
components.append(
{
"type": 1,
"components": children,
}
)
return components
async def callback(self, interaction: Interaction):
"""|coro|
The callback associated with this Modal.
This can be overriden by subclasses.
Parameters
-----------
interaction: :class:`.Interaction`
The interaction that submitted this Modal.
"""
pass
async def on_error(self, error: Exception, interaction: Interaction):
"""|coro|
The callback for when an error occurs in the :meth:`callback`.
The default implementation prints the traceback to stderr.
Parameters
-----------
error: :class:`Exception`
The error that occurred.
interaction: :class:`.Interaction`
The interaction that submitted this Modal.
"""
print(f"Ignoring exception in modal {self}:", file=sys.stderr)
traceback.print_exception(error.__class__, error, error.__traceback__, file=sys.stderr)
def to_dict(self) -> Dict[str, Any]:
return {
"title": self.title,
"custom_id": self.custom_id,
"components": self.to_components(),
}
class ModalStore:
def __init__(self, state: ConnectionState) -> None:
# (user_id, custom_id) : Modal
self._modals: Dict[Tuple[int, str], Modal] = {}
self._state = state
def add_modal(self, modal: Modal, user_id: int):
self._modals[(user_id, modal.custom_id)] = modal
def remove_modal(self, modal: Modal, user_id: int):
self._modals.pop((user_id, modal.custom_id))
async def _scheduled_task(self, modal: Modal, interaction: Interaction):
try:
await modal.callback(interaction)
except Exception as e:
await modal.on_error(e, interaction)
def dispatch(self, user_id: int, custom_id: str, interaction: Interaction):
key = (user_id, custom_id)
modal = self._modals.get(key)
if modal is None:
return
assert interaction.data is not None
components = [
component for action_row in interaction.data["components"] for component in action_row["components"]
]
for component in components:
component_custom_id = component["custom_id"]
for child in modal.children:
if child.custom_id == component_custom_id: # type: ignore
child.refresh_state(component)
break
asyncio.create_task(
self._scheduled_task(modal, interaction), name=f"discord-ui-modal-dispatch-{modal.custom_id}"
)
self.remove_modal(modal, user_id)
| 29.413793
| 112
| 0.603751
|
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import os
import asyncio
import sys
import traceback
from .item import Item
from itertools import groupby
from .view import _ViewWeights as _ModalWeights
from ..interactions import Interaction
if TYPE_CHECKING:
from ..state import ConnectionState
__all__ = ("Modal",)
class Modal:
def __init__(self, title: str, custom_id: Optional[str] = None) -> None:
self.title = title
self.custom_id = custom_id or os.urandom(16).hex()
self.children: List[Item] = []
self.__weights = _ModalWeights(self.children)
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.title=} {self.custom_id=}>"
def add_item(self, item: Item):
if not isinstance(item, Item):
raise TypeError(f"expected Item not {item.__class__!r}")
if len(self.children) > 5:
raise ValueError("Modal can only have a maximum of 5 items")
self.__weights.add_item(item)
self.children.append(item)
def remove_item(self, item: Item):
try:
self.children.remove(item)
except ValueError:
pass
else:
self.__weights.remove_item(item)
def to_components(self) -> List[Dict[str, Any]]:
def key(item: Item) -> int:
return item._rendered_row or 0
children = sorted(self.children, key=key)
components: List[Dict[str, Any]] = []
for _, group in groupby(children, key=key):
children = [item.to_component_dict() for item in group]
if not children:
continue
components.append(
{
"type": 1,
"components": children,
}
)
return components
async def callback(self, interaction: Interaction):
pass
async def on_error(self, error: Exception, interaction: Interaction):
print(f"Ignoring exception in modal {self}:", file=sys.stderr)
traceback.print_exception(error.__class__, error, error.__traceback__, file=sys.stderr)
def to_dict(self) -> Dict[str, Any]:
return {
"title": self.title,
"custom_id": self.custom_id,
"components": self.to_components(),
}
class ModalStore:
def __init__(self, state: ConnectionState) -> None:
self._modals: Dict[Tuple[int, str], Modal] = {}
self._state = state
def add_modal(self, modal: Modal, user_id: int):
self._modals[(user_id, modal.custom_id)] = modal
def remove_modal(self, modal: Modal, user_id: int):
self._modals.pop((user_id, modal.custom_id))
async def _scheduled_task(self, modal: Modal, interaction: Interaction):
try:
await modal.callback(interaction)
except Exception as e:
await modal.on_error(e, interaction)
def dispatch(self, user_id: int, custom_id: str, interaction: Interaction):
key = (user_id, custom_id)
modal = self._modals.get(key)
if modal is None:
return
assert interaction.data is not None
components = [
component for action_row in interaction.data["components"] for component in action_row["components"]
]
for component in components:
component_custom_id = component["custom_id"]
for child in modal.children:
if child.custom_id == component_custom_id:
child.refresh_state(component)
break
asyncio.create_task(
self._scheduled_task(modal, interaction), name=f"discord-ui-modal-dispatch-{modal.custom_id}"
)
self.remove_modal(modal, user_id)
| true
| true
|
1c49bde9cb2690f493422ebf0eb42afb5f96147d
| 654
|
py
|
Python
|
src/directional_clustering/clustering/kmeans/__init__.py
|
arpastrana/directional_clustering
|
78fd39fe4ad207b2a639deddf4ba12d5580df5c6
|
[
"MIT"
] | 6
|
2020-08-04T15:24:22.000Z
|
2022-02-02T21:34:33.000Z
|
src/directional_clustering/clustering/kmeans/__init__.py
|
arpastrana/directional_clustering
|
78fd39fe4ad207b2a639deddf4ba12d5580df5c6
|
[
"MIT"
] | 3
|
2020-10-30T02:33:08.000Z
|
2020-11-04T19:45:08.000Z
|
src/directional_clustering/clustering/kmeans/__init__.py
|
arpastrana/directional_clustering
|
78fd39fe4ad207b2a639deddf4ba12d5580df5c6
|
[
"MIT"
] | null | null | null |
"""
directional_clustering.clustering.kmeans
****************************
.. currentmodule:: directional_clustering.clustering.kmeans
Classes
=======
.. autosummary::
:toctree: generated/
:nosignatures:
Functions
=========
.. autosummary::
:toctree: generated/
:nosignatures:
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# from .<module> import *
from .operations import *
from .distances import *
from ._kmeans import *
from .cosine import *
from .variational import *
from .euclidean import *
__all__ = [name for name in dir() if not name.startswith('_')]
| 16.769231
| 62
| 0.689602
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .operations import *
from .distances import *
from ._kmeans import *
from .cosine import *
from .variational import *
from .euclidean import *
__all__ = [name for name in dir() if not name.startswith('_')]
| true
| true
|
1c49be5ca86913a07ac3f5f1c7e0e1e3d9ac11be
| 408
|
py
|
Python
|
integration_tests/mass_simulator_ITG_test.py
|
hoondental/smtm
|
f7648da652c5437ee27efef6fbf2480045130c16
|
[
"MIT"
] | 16
|
2020-02-21T08:18:04.000Z
|
2022-03-29T06:34:29.000Z
|
integration_tests/mass_simulator_ITG_test.py
|
hoondental/smtm
|
f7648da652c5437ee27efef6fbf2480045130c16
|
[
"MIT"
] | 31
|
2019-11-11T13:06:47.000Z
|
2022-02-26T12:14:41.000Z
|
integration_tests/mass_simulator_ITG_test.py
|
msaltnet/smtm
|
b2b480a59204e7d730d60ec037b00660d9dd235d
|
[
"MIT"
] | 12
|
2020-07-03T06:44:22.000Z
|
2022-03-30T03:03:05.000Z
|
import time
import unittest
from smtm import MassSimulator
from unittest.mock import *
class MassSimulatorIntegrationTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@patch("builtins.print")
def test_ITG_run_single_simulation(self, mock_print):
mass = MassSimulator()
mass.run("integration_tests/data/mass_simulation_config.json")
| 21.473684
| 70
| 0.720588
|
import time
import unittest
from smtm import MassSimulator
from unittest.mock import *
class MassSimulatorIntegrationTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@patch("builtins.print")
def test_ITG_run_single_simulation(self, mock_print):
mass = MassSimulator()
mass.run("integration_tests/data/mass_simulation_config.json")
| true
| true
|
1c49bf29b541c47c8d66c29b20e2bf30bc1b27d6
| 765
|
py
|
Python
|
tests/journal.ext/info_example.py
|
PyreFramework/pyre
|
345c7449a3416eea1c1affa74fb32faff30a6aaa
|
[
"BSD-3-Clause"
] | null | null | null |
tests/journal.ext/info_example.py
|
PyreFramework/pyre
|
345c7449a3416eea1c1affa74fb32faff30a6aaa
|
[
"BSD-3-Clause"
] | null | null | null |
tests/journal.ext/info_example.py
|
PyreFramework/pyre
|
345c7449a3416eea1c1affa74fb32faff30a6aaa
|
[
"BSD-3-Clause"
] | null | null | null |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis <michael.aivazis@para-sim.com>
# (c) 1998-2022 all rights reserved
def test():
"""
Exercise the info channel with a realistic example
"""
# get the trash can
from journal.ext.journal import Trash as trash
# and the channel
from journal.ext.journal import Informational as info
# make an info channel
channel = info(name="tests.journal.info")
# send the output to trash
channel.device = trash()
# add some metadata
channel.notes["time"] = "now"
# inject
channel.line("info channel:")
channel.log(" hello world!")
# all done
return
# main
if __name__ == "__main__":
# run the test
test()
# end of file
| 19.615385
| 57
| 0.627451
|
def test():
from journal.ext.journal import Trash as trash
from journal.ext.journal import Informational as info
channel = info(name="tests.journal.info")
channel.device = trash()
channel.notes["time"] = "now"
channel.line("info channel:")
channel.log(" hello world!")
return
if __name__ == "__main__":
test()
| true
| true
|
1c49bfa2d1946ee3be217dd485422832d454b222
| 292
|
py
|
Python
|
xicam/spectral/operations/clustering.py
|
Xi-CAM/Xi-cam.spectral
|
62240c4992ba79a2f97db99ade988a4613566e98
|
[
"BSD-3-Clause"
] | null | null | null |
xicam/spectral/operations/clustering.py
|
Xi-CAM/Xi-cam.spectral
|
62240c4992ba79a2f97db99ade988a4613566e98
|
[
"BSD-3-Clause"
] | 10
|
2020-09-15T03:16:26.000Z
|
2021-02-06T08:17:47.000Z
|
xicam/spectral/operations/clustering.py
|
Xi-CAM/Xi-cam.spectral
|
62240c4992ba79a2f97db99ade988a4613566e98
|
[
"BSD-3-Clause"
] | 1
|
2020-10-20T17:06:43.000Z
|
2020-10-20T17:06:43.000Z
|
# k-means
## scikit-learn.cluster.kmeans
# hierarchical clustering analysis "EMSC" (Extended Multiplicative Scattering Correction)
## https://scikit-learn.org/stable/modules/clustering.html#hierarchical-clustering
## https://github.com/RPCausin/EMSC/blob/master/EMSC.py: (Bassan, Konevskikh)
| 48.666667
| 89
| 0.794521
| true
| true
|
|
1c49c0b805416ccceb49291147df00302441da20
| 10,379
|
py
|
Python
|
test/mitmproxy/test_optmanager.py
|
dotnes/mitmproxy
|
5eb17bbf6d47c8d703763bfa41cf1ff3f98a632f
|
[
"MIT"
] | 1
|
2017-12-27T09:05:23.000Z
|
2017-12-27T09:05:23.000Z
|
test/mitmproxy/test_optmanager.py
|
dotnes/mitmproxy
|
5eb17bbf6d47c8d703763bfa41cf1ff3f98a632f
|
[
"MIT"
] | null | null | null |
test/mitmproxy/test_optmanager.py
|
dotnes/mitmproxy
|
5eb17bbf6d47c8d703763bfa41cf1ff3f98a632f
|
[
"MIT"
] | 2
|
2018-09-03T19:26:31.000Z
|
2019-04-08T23:05:15.000Z
|
import copy
import pytest
import typing
import argparse
from mitmproxy import options
from mitmproxy import optmanager
from mitmproxy import exceptions
class TO(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("one", typing.Optional[int], None, "help")
self.add_option("two", typing.Optional[int], 2, "help")
self.add_option("bool", bool, False, "help")
self.add_option("required_int", int, 2, "help")
class TD(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("one", str, "done", "help")
self.add_option("two", str, "dtwo", "help")
class TD2(TD):
def __init__(self):
super().__init__()
self.add_option("three", str, "dthree", "help")
self.add_option("four", str, "dfour", "help")
class TM(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("two", typing.Sequence[str], ["foo"], "help")
self.add_option("one", typing.Optional[str], None, "help")
def test_defaults():
o = TD2()
defaults = {
"one": "done",
"two": "dtwo",
"three": "dthree",
"four": "dfour",
}
for k, v in defaults.items():
assert o.default(k) == v
assert not o.has_changed("one")
newvals = dict(
one="xone",
two="xtwo",
three="xthree",
four="xfour",
)
o.update(**newvals)
assert o.has_changed("one")
for k, v in newvals.items():
assert v == getattr(o, k)
o.reset()
assert not o.has_changed("one")
for k in o.keys():
assert not o.has_changed(k)
def test_required_int():
o = TO()
with pytest.raises(exceptions.OptionsError):
o.parse_setval("required_int", None)
def test_deepcopy():
o = TD()
copy.deepcopy(o)
def test_options():
o = TO()
assert o.keys() == {"bool", "one", "two", "required_int"}
assert o.one is None
assert o.two == 2
o.one = 1
assert o.one == 1
with pytest.raises(TypeError):
TO(nonexistent = "value")
with pytest.raises(Exception, match="Unknown options"):
o.nonexistent = "value"
with pytest.raises(Exception, match="Unknown options"):
o.update(nonexistent = "value")
assert o.update_known(nonexistent = "value") == {"nonexistent": "value"}
rec = []
def sub(opts, updated):
rec.append(copy.copy(opts))
o.changed.connect(sub)
o.one = 90
assert len(rec) == 1
assert rec[-1].one == 90
o.update(one=3)
assert len(rec) == 2
assert rec[-1].one == 3
def test_setter():
o = TO()
f = o.setter("two")
f(99)
assert o.two == 99
with pytest.raises(Exception, match="No such option"):
o.setter("nonexistent")
def test_toggler():
o = TO()
f = o.toggler("bool")
assert o.bool is False
f()
assert o.bool is True
f()
assert o.bool is False
with pytest.raises(Exception, match="No such option"):
o.toggler("nonexistent")
with pytest.raises(Exception, match="boolean options"):
o.toggler("one")
class Rec():
def __init__(self):
self.called = None
def __call__(self, *args, **kwargs):
self.called = (args, kwargs)
def test_subscribe():
o = TO()
r = Rec()
# pytest.raises keeps a reference here that interferes with the cleanup test
# further down.
try:
o.subscribe(r, ["unknown"])
except exceptions.OptionsError:
pass
else:
raise AssertionError
assert len(o.changed.receivers) == 0
o.subscribe(r, ["two"])
o.one = 2
assert not r.called
o.two = 3
assert r.called
assert len(o.changed.receivers) == 1
del r
o.two = 4
assert len(o.changed.receivers) == 0
class binder:
def __init__(self):
self.o = TO()
self.called = False
self.o.subscribe(self.bound, ["two"])
def bound(self, *args, **kwargs):
self.called = True
t = binder()
t.o.one = 3
assert not t.called
t.o.two = 3
assert t.called
def test_rollback():
o = TO()
rec = []
def sub(opts, updated):
rec.append(copy.copy(opts))
recerr = []
def errsub(opts, **kwargs):
recerr.append(kwargs)
def err(opts, updated):
if opts.one == 10:
raise exceptions.OptionsError()
if opts.bool is True:
raise exceptions.OptionsError()
o.changed.connect(sub)
o.changed.connect(err)
o.errored.connect(errsub)
assert o.one is None
with pytest.raises(exceptions.OptionsError):
o.one = 10
assert o.one is None
with pytest.raises(exceptions.OptionsError):
o.bool = True
assert o.bool is False
assert isinstance(recerr[0]["exc"], exceptions.OptionsError)
assert o.one is None
assert o.bool is False
assert len(rec) == 4
assert rec[0].one == 10
assert rec[1].one is None
assert rec[2].bool is True
assert rec[3].bool is False
with pytest.raises(exceptions.OptionsError):
with o.rollback({"one"}, reraise=True):
raise exceptions.OptionsError()
def test_simple():
assert repr(TO())
assert "one" in TO()
def test_items():
assert TO().items()
def test_serialize():
o = TD2()
o.three = "set"
assert "dfour" in optmanager.serialize(o, None, defaults=True)
data = optmanager.serialize(o, None)
assert "dfour" not in data
o2 = TD2()
optmanager.load(o2, data)
assert o2 == o
assert not o == 42
t = """
unknown: foo
"""
data = optmanager.serialize(o, t)
o2 = TD2()
optmanager.load(o2, data)
assert o2 == o
t = "invalid: foo\ninvalid"
with pytest.raises(Exception, match="Config error"):
optmanager.load(o2, t)
t = "invalid"
with pytest.raises(Exception, match="Config error"):
optmanager.load(o2, t)
t = "# a comment"
optmanager.load(o2, t)
assert optmanager.load(o2, "foobar: '123'") == {"foobar": "123"}
t = ""
optmanager.load(o2, t)
assert optmanager.load(o2, "foobar: '123'") == {"foobar": "123"}
def test_serialize_defaults():
o = options.Options()
assert optmanager.serialize(o, None, defaults=True)
def test_saving(tmpdir):
o = TD2()
o.three = "set"
dst = str(tmpdir.join("conf"))
optmanager.save(o, dst, defaults=True)
o2 = TD2()
optmanager.load_paths(o2, dst)
o2.three = "foo"
optmanager.save(o2, dst, defaults=True)
optmanager.load_paths(o, dst)
assert o.three == "foo"
with open(dst, 'a') as f:
f.write("foobar: '123'")
assert optmanager.load_paths(o, dst) == {"foobar": "123"}
with open(dst, 'a') as f:
f.write("'''")
with pytest.raises(exceptions.OptionsError):
optmanager.load_paths(o, dst)
with open(dst, 'wb') as f:
f.write(b"\x01\x02\x03")
with pytest.raises(exceptions.OptionsError):
optmanager.load_paths(o, dst)
with pytest.raises(exceptions.OptionsError):
optmanager.save(o, dst)
with open(dst, 'wb') as f:
f.write(b"\xff\xff\xff")
with pytest.raises(exceptions.OptionsError):
optmanager.load_paths(o, dst)
with pytest.raises(exceptions.OptionsError):
optmanager.save(o, dst)
def test_merge():
m = TM()
m.merge(dict(one="two"))
assert m.one == "two"
m.merge(dict(one=None))
assert m.one == "two"
m.merge(dict(two=["bar"]))
assert m.two == ["foo", "bar"]
def test_option():
o = optmanager._Option("test", int, 1, "help", None)
assert o.current() == 1
with pytest.raises(TypeError):
o.set("foo")
with pytest.raises(TypeError):
optmanager._Option("test", str, 1, "help", None)
o2 = optmanager._Option("test", int, 1, "help", None)
assert o2 == o
o2.set(5)
assert o2 != o
def test_dump_defaults():
o = options.Options()
assert optmanager.dump_defaults(o)
def test_dump_dicts():
o = options.Options()
assert optmanager.dump_dicts(o)
assert optmanager.dump_dicts(o, ['http2', 'listen_port'])
class TTypes(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("str", str, "str", "help")
self.add_option("optstr", typing.Optional[str], "optstr", "help", "help")
self.add_option("bool", bool, False, "help")
self.add_option("bool_on", bool, True, "help")
self.add_option("int", int, 0, "help")
self.add_option("optint", typing.Optional[int], 0, "help")
self.add_option("seqstr", typing.Sequence[str], [], "help")
self.add_option("unknown", float, 0.0, "help")
def test_make_parser():
parser = argparse.ArgumentParser()
opts = TTypes()
opts.make_parser(parser, "str", short="a")
opts.make_parser(parser, "bool", short="b")
opts.make_parser(parser, "int", short="c")
opts.make_parser(parser, "seqstr", short="d")
opts.make_parser(parser, "bool_on", short="e")
with pytest.raises(ValueError):
opts.make_parser(parser, "unknown")
# Nonexistent options ignore
opts.make_parser(parser, "nonexistentxxx")
def test_set():
opts = TTypes()
opts.set("str=foo")
assert opts.str == "foo"
with pytest.raises(TypeError):
opts.set("str")
opts.set("optstr=foo")
assert opts.optstr == "foo"
opts.set("optstr")
assert opts.optstr is None
opts.set("bool=false")
assert opts.bool is False
opts.set("bool")
assert opts.bool is True
opts.set("bool=true")
assert opts.bool is True
with pytest.raises(exceptions.OptionsError):
opts.set("bool=wobble")
opts.set("bool=toggle")
assert opts.bool is False
opts.set("bool=toggle")
assert opts.bool is True
opts.set("int=1")
assert opts.int == 1
with pytest.raises(exceptions.OptionsError):
opts.set("int=wobble")
opts.set("optint")
assert opts.optint is None
assert opts.seqstr == []
opts.set("seqstr=foo")
assert opts.seqstr == ["foo"]
opts.set("seqstr=bar")
assert opts.seqstr == ["foo", "bar"]
opts.set("seqstr")
assert opts.seqstr == []
with pytest.raises(exceptions.OptionsError):
opts.set("nonexistent=wobble")
| 24.137209
| 81
| 0.598902
|
import copy
import pytest
import typing
import argparse
from mitmproxy import options
from mitmproxy import optmanager
from mitmproxy import exceptions
class TO(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("one", typing.Optional[int], None, "help")
self.add_option("two", typing.Optional[int], 2, "help")
self.add_option("bool", bool, False, "help")
self.add_option("required_int", int, 2, "help")
class TD(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("one", str, "done", "help")
self.add_option("two", str, "dtwo", "help")
class TD2(TD):
def __init__(self):
super().__init__()
self.add_option("three", str, "dthree", "help")
self.add_option("four", str, "dfour", "help")
class TM(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("two", typing.Sequence[str], ["foo"], "help")
self.add_option("one", typing.Optional[str], None, "help")
def test_defaults():
o = TD2()
defaults = {
"one": "done",
"two": "dtwo",
"three": "dthree",
"four": "dfour",
}
for k, v in defaults.items():
assert o.default(k) == v
assert not o.has_changed("one")
newvals = dict(
one="xone",
two="xtwo",
three="xthree",
four="xfour",
)
o.update(**newvals)
assert o.has_changed("one")
for k, v in newvals.items():
assert v == getattr(o, k)
o.reset()
assert not o.has_changed("one")
for k in o.keys():
assert not o.has_changed(k)
def test_required_int():
o = TO()
with pytest.raises(exceptions.OptionsError):
o.parse_setval("required_int", None)
def test_deepcopy():
o = TD()
copy.deepcopy(o)
def test_options():
o = TO()
assert o.keys() == {"bool", "one", "two", "required_int"}
assert o.one is None
assert o.two == 2
o.one = 1
assert o.one == 1
with pytest.raises(TypeError):
TO(nonexistent = "value")
with pytest.raises(Exception, match="Unknown options"):
o.nonexistent = "value"
with pytest.raises(Exception, match="Unknown options"):
o.update(nonexistent = "value")
assert o.update_known(nonexistent = "value") == {"nonexistent": "value"}
rec = []
def sub(opts, updated):
rec.append(copy.copy(opts))
o.changed.connect(sub)
o.one = 90
assert len(rec) == 1
assert rec[-1].one == 90
o.update(one=3)
assert len(rec) == 2
assert rec[-1].one == 3
def test_setter():
o = TO()
f = o.setter("two")
f(99)
assert o.two == 99
with pytest.raises(Exception, match="No such option"):
o.setter("nonexistent")
def test_toggler():
o = TO()
f = o.toggler("bool")
assert o.bool is False
f()
assert o.bool is True
f()
assert o.bool is False
with pytest.raises(Exception, match="No such option"):
o.toggler("nonexistent")
with pytest.raises(Exception, match="boolean options"):
o.toggler("one")
class Rec():
def __init__(self):
self.called = None
def __call__(self, *args, **kwargs):
self.called = (args, kwargs)
def test_subscribe():
o = TO()
r = Rec()
try:
o.subscribe(r, ["unknown"])
except exceptions.OptionsError:
pass
else:
raise AssertionError
assert len(o.changed.receivers) == 0
o.subscribe(r, ["two"])
o.one = 2
assert not r.called
o.two = 3
assert r.called
assert len(o.changed.receivers) == 1
del r
o.two = 4
assert len(o.changed.receivers) == 0
class binder:
def __init__(self):
self.o = TO()
self.called = False
self.o.subscribe(self.bound, ["two"])
def bound(self, *args, **kwargs):
self.called = True
t = binder()
t.o.one = 3
assert not t.called
t.o.two = 3
assert t.called
def test_rollback():
o = TO()
rec = []
def sub(opts, updated):
rec.append(copy.copy(opts))
recerr = []
def errsub(opts, **kwargs):
recerr.append(kwargs)
def err(opts, updated):
if opts.one == 10:
raise exceptions.OptionsError()
if opts.bool is True:
raise exceptions.OptionsError()
o.changed.connect(sub)
o.changed.connect(err)
o.errored.connect(errsub)
assert o.one is None
with pytest.raises(exceptions.OptionsError):
o.one = 10
assert o.one is None
with pytest.raises(exceptions.OptionsError):
o.bool = True
assert o.bool is False
assert isinstance(recerr[0]["exc"], exceptions.OptionsError)
assert o.one is None
assert o.bool is False
assert len(rec) == 4
assert rec[0].one == 10
assert rec[1].one is None
assert rec[2].bool is True
assert rec[3].bool is False
with pytest.raises(exceptions.OptionsError):
with o.rollback({"one"}, reraise=True):
raise exceptions.OptionsError()
def test_simple():
assert repr(TO())
assert "one" in TO()
def test_items():
assert TO().items()
def test_serialize():
o = TD2()
o.three = "set"
assert "dfour" in optmanager.serialize(o, None, defaults=True)
data = optmanager.serialize(o, None)
assert "dfour" not in data
o2 = TD2()
optmanager.load(o2, data)
assert o2 == o
assert not o == 42
t = """
unknown: foo
"""
data = optmanager.serialize(o, t)
o2 = TD2()
optmanager.load(o2, data)
assert o2 == o
t = "invalid: foo\ninvalid"
with pytest.raises(Exception, match="Config error"):
optmanager.load(o2, t)
t = "invalid"
with pytest.raises(Exception, match="Config error"):
optmanager.load(o2, t)
t = "# a comment"
optmanager.load(o2, t)
assert optmanager.load(o2, "foobar: '123'") == {"foobar": "123"}
t = ""
optmanager.load(o2, t)
assert optmanager.load(o2, "foobar: '123'") == {"foobar": "123"}
def test_serialize_defaults():
o = options.Options()
assert optmanager.serialize(o, None, defaults=True)
def test_saving(tmpdir):
o = TD2()
o.three = "set"
dst = str(tmpdir.join("conf"))
optmanager.save(o, dst, defaults=True)
o2 = TD2()
optmanager.load_paths(o2, dst)
o2.three = "foo"
optmanager.save(o2, dst, defaults=True)
optmanager.load_paths(o, dst)
assert o.three == "foo"
with open(dst, 'a') as f:
f.write("foobar: '123'")
assert optmanager.load_paths(o, dst) == {"foobar": "123"}
with open(dst, 'a') as f:
f.write("'''")
with pytest.raises(exceptions.OptionsError):
optmanager.load_paths(o, dst)
with open(dst, 'wb') as f:
f.write(b"\x01\x02\x03")
with pytest.raises(exceptions.OptionsError):
optmanager.load_paths(o, dst)
with pytest.raises(exceptions.OptionsError):
optmanager.save(o, dst)
with open(dst, 'wb') as f:
f.write(b"\xff\xff\xff")
with pytest.raises(exceptions.OptionsError):
optmanager.load_paths(o, dst)
with pytest.raises(exceptions.OptionsError):
optmanager.save(o, dst)
def test_merge():
m = TM()
m.merge(dict(one="two"))
assert m.one == "two"
m.merge(dict(one=None))
assert m.one == "two"
m.merge(dict(two=["bar"]))
assert m.two == ["foo", "bar"]
def test_option():
o = optmanager._Option("test", int, 1, "help", None)
assert o.current() == 1
with pytest.raises(TypeError):
o.set("foo")
with pytest.raises(TypeError):
optmanager._Option("test", str, 1, "help", None)
o2 = optmanager._Option("test", int, 1, "help", None)
assert o2 == o
o2.set(5)
assert o2 != o
def test_dump_defaults():
o = options.Options()
assert optmanager.dump_defaults(o)
def test_dump_dicts():
o = options.Options()
assert optmanager.dump_dicts(o)
assert optmanager.dump_dicts(o, ['http2', 'listen_port'])
class TTypes(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("str", str, "str", "help")
self.add_option("optstr", typing.Optional[str], "optstr", "help", "help")
self.add_option("bool", bool, False, "help")
self.add_option("bool_on", bool, True, "help")
self.add_option("int", int, 0, "help")
self.add_option("optint", typing.Optional[int], 0, "help")
self.add_option("seqstr", typing.Sequence[str], [], "help")
self.add_option("unknown", float, 0.0, "help")
def test_make_parser():
parser = argparse.ArgumentParser()
opts = TTypes()
opts.make_parser(parser, "str", short="a")
opts.make_parser(parser, "bool", short="b")
opts.make_parser(parser, "int", short="c")
opts.make_parser(parser, "seqstr", short="d")
opts.make_parser(parser, "bool_on", short="e")
with pytest.raises(ValueError):
opts.make_parser(parser, "unknown")
# Nonexistent options ignore
opts.make_parser(parser, "nonexistentxxx")
def test_set():
opts = TTypes()
opts.set("str=foo")
assert opts.str == "foo"
with pytest.raises(TypeError):
opts.set("str")
opts.set("optstr=foo")
assert opts.optstr == "foo"
opts.set("optstr")
assert opts.optstr is None
opts.set("bool=false")
assert opts.bool is False
opts.set("bool")
assert opts.bool is True
opts.set("bool=true")
assert opts.bool is True
with pytest.raises(exceptions.OptionsError):
opts.set("bool=wobble")
opts.set("bool=toggle")
assert opts.bool is False
opts.set("bool=toggle")
assert opts.bool is True
opts.set("int=1")
assert opts.int == 1
with pytest.raises(exceptions.OptionsError):
opts.set("int=wobble")
opts.set("optint")
assert opts.optint is None
assert opts.seqstr == []
opts.set("seqstr=foo")
assert opts.seqstr == ["foo"]
opts.set("seqstr=bar")
assert opts.seqstr == ["foo", "bar"]
opts.set("seqstr")
assert opts.seqstr == []
with pytest.raises(exceptions.OptionsError):
opts.set("nonexistent=wobble")
| true
| true
|
1c49c25e5e36e3834b02920900d27d8725a5e1cf
| 1,182
|
py
|
Python
|
pets/forms/pets.py
|
IvanParvanovski/petstagram-repository
|
03f1464d4f5919712446f812fad044056f9a15f6
|
[
"MIT"
] | 2
|
2021-06-10T08:18:00.000Z
|
2021-06-12T19:10:49.000Z
|
pets/forms/pets.py
|
IvanParvanovski/petstagram-repository
|
03f1464d4f5919712446f812fad044056f9a15f6
|
[
"MIT"
] | null | null | null |
pets/forms/pets.py
|
IvanParvanovski/petstagram-repository
|
03f1464d4f5919712446f812fad044056f9a15f6
|
[
"MIT"
] | null | null | null |
from django import forms
from pets.models.pet import Pet
class PetCreateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for (_, field) in self.fields.items():
field.widget.attrs['class'] = 'form-control'
class Meta:
model = Pet
exclude = ('user', )
# widgets = {
# 'type': forms.Select(
# attrs={
# 'class': 'form-control',
# },
# ),
#
# 'name': forms.TextInput(
# attrs={
# 'class': 'form-control',
# },
# ),
#
# 'age': forms.NumberInput(
# attrs={
# 'class': 'form-control'
# },
# ),
#
# 'image_url': forms.TextInput(
# attrs={
# 'class': 'form-control'
# },
# ),
#
# 'description': forms.Textarea(
# attrs={
# 'class': 'form-control'
# },
# ),
# }
| 25.148936
| 56
| 0.358714
|
from django import forms
from pets.models.pet import Pet
class PetCreateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for (_, field) in self.fields.items():
field.widget.attrs['class'] = 'form-control'
class Meta:
model = Pet
exclude = ('user', )
| true
| true
|
1c49c279cce146a4c48b5adc7ff7367616606ace
| 2,101
|
py
|
Python
|
wagtail_commons/core/management/commands/bootstrap_users.py
|
bgrace/wagtail-commons
|
37985629e3098842c08f6ae7072c2af8a69319f0
|
[
"BSD-3-Clause"
] | 13
|
2015-03-13T06:44:47.000Z
|
2021-08-01T02:36:36.000Z
|
wagtail_commons/core/management/commands/bootstrap_users.py
|
bgrace/wagtail-commons
|
37985629e3098842c08f6ae7072c2af8a69319f0
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail_commons/core/management/commands/bootstrap_users.py
|
bgrace/wagtail-commons
|
37985629e3098842c08f6ae7072c2af8a69319f0
|
[
"BSD-3-Clause"
] | 1
|
2016-02-07T20:54:40.000Z
|
2016-02-07T20:54:40.000Z
|
from django.conf import settings
__author__ = 'brett@codigious.com'
import codecs
import os
from optparse import make_option
import yaml
import yaml.parser
from django.contrib.auth.models import User
from django.db.utils import IntegrityError
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
args = '<content directory>'
help = 'Create users, found in <content directory>/users.yml'
option_list = BaseCommand.option_list + (
make_option('--content', dest='content_path', type='string', ),
)
def handle(self, *args, **options):
if options['content_path']:
path = options['content_path']
elif settings.BOOTSTRAP_CONTENT_DIR:
path = settings.BOOTSTRAP_CONTENT_DIR
else:
raise CommandError("Pass --content <content dir>, where <content dir>/pages contain .yml files")
if not os.path.isdir(path):
raise CommandError("Content dir '{0}' does not exist or is not a directory".format(path))
content_path = os.path.join(path, 'users.yml')
if not os.path.isfile(content_path):
raise CommandError("Could not find file '{0}'".format(content_path))
f = codecs.open(content_path, encoding='utf-8')
stream = yaml.load_all(f)
users = next(stream)
f.close()
for user in users:
try:
u = User.objects.create(username=user['username'],
email=user['email'],
first_name=user['first_name'],
last_name=user['last_name'],
is_superuser=user['is_superuser'],
is_staff=user['is_staff'])
u.set_password(user['password'])
u.save()
self.stdout.write("Created {0}".format(user['username']))
except IntegrityError:
self.stderr.write("Could not create {0}, already exists?".format(user['username']))
| 36.859649
| 108
| 0.585436
|
from django.conf import settings
__author__ = 'brett@codigious.com'
import codecs
import os
from optparse import make_option
import yaml
import yaml.parser
from django.contrib.auth.models import User
from django.db.utils import IntegrityError
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
args = '<content directory>'
help = 'Create users, found in <content directory>/users.yml'
option_list = BaseCommand.option_list + (
make_option('--content', dest='content_path', type='string', ),
)
def handle(self, *args, **options):
if options['content_path']:
path = options['content_path']
elif settings.BOOTSTRAP_CONTENT_DIR:
path = settings.BOOTSTRAP_CONTENT_DIR
else:
raise CommandError("Pass --content <content dir>, where <content dir>/pages contain .yml files")
if not os.path.isdir(path):
raise CommandError("Content dir '{0}' does not exist or is not a directory".format(path))
content_path = os.path.join(path, 'users.yml')
if not os.path.isfile(content_path):
raise CommandError("Could not find file '{0}'".format(content_path))
f = codecs.open(content_path, encoding='utf-8')
stream = yaml.load_all(f)
users = next(stream)
f.close()
for user in users:
try:
u = User.objects.create(username=user['username'],
email=user['email'],
first_name=user['first_name'],
last_name=user['last_name'],
is_superuser=user['is_superuser'],
is_staff=user['is_staff'])
u.set_password(user['password'])
u.save()
self.stdout.write("Created {0}".format(user['username']))
except IntegrityError:
self.stderr.write("Could not create {0}, already exists?".format(user['username']))
| true
| true
|
1c49c4ae101e4f898905a971de616b3252a2ebed
| 4,978
|
py
|
Python
|
oauth2.py
|
mats-ch/ctfd-oauth
|
59d9c6bd22c69f12d909329cc94293270fb09ba6
|
[
"MIT"
] | null | null | null |
oauth2.py
|
mats-ch/ctfd-oauth
|
59d9c6bd22c69f12d909329cc94293270fb09ba6
|
[
"MIT"
] | null | null | null |
oauth2.py
|
mats-ch/ctfd-oauth
|
59d9c6bd22c69f12d909329cc94293270fb09ba6
|
[
"MIT"
] | null | null | null |
from flask import render_template, session, redirect
from flask_dance.contrib import azure, github
import flask_dance.contrib
import os
from CTFd.auth import confirm, register, reset_password, login
from CTFd.models import db, Users
from CTFd.utils import set_config
from CTFd.utils.logging import log
from CTFd.utils.security.auth import login_user, logout_user
from CTFd import utils
import boto3
import base64
from botocore.exceptions import ClientError
import json
def get_secret():
secret_name = "ctf_azure_sso"
region_name = "eu-west-1"
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name
)
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
return json.loads(secret)
else:
decoded_binary_secret = base64.b64decode(get_secret_value_response['SecretBinary'])
return json.loads(decoded_binary_secret)
def load(app):
########################
# Plugin Configuration #
########################
aws_secret = get_secret()
authentication_url_prefix = "/auth"
oauth_client_id = aws_secret['OAUTHLOGIN_CLIENT_ID']
oauth_client_secret = aws_secret['OAUTHLOGIN_CLIENT_SECRET']
oauth_provider = "azure"
create_missing_user = True
##################
# User Functions #
##################
def retrieve_user_from_database(username):
user = Users.query.filter_by(email=username).first()
if user is not None:
log('logins', "[{date}] {ip} - " + user.name + " - OAuth2 bridged user found")
return user
def create_user(username, displayName):
with app.app_context():
user = Users(email=username, name=displayName.strip())
log('logins', "[{date}] {ip} - " + user.name + " - No OAuth2 bridged user found, creating user")
db.session.add(user)
db.session.commit()
db.session.flush()
login_user(user)
return user
def create_or_get_user(username, displayName):
user = retrieve_user_from_database(username)
if user is not None:
login_user(user)
return user
if create_missing_user:
return create_user(username, displayName)
else:
log('logins', "[{date}] {ip} - " + user.name + " - No OAuth2 bridged user found and not configured to create missing users")
return None
##########################
# Provider Configuration #
##########################
provider_blueprints = {
'azure': lambda: flask_dance.contrib.azure.make_azure_blueprint(
login_url='/azure',
client_id=oauth_client_id,
client_secret=oauth_client_secret,
redirect_url=authentication_url_prefix + "/azure/confirm"),
'github': lambda: flask_dance.contrib.github.make_github_blueprint(
login_url='/github',
client_id=oauth_client_id,
client_secret=oauth_client_secret,
redirect_url=authentication_url_prefix + "/github/confirm")
}
def get_azure_user():
user_info = flask_dance.contrib.azure.azure.get("/v1.0/me").json()
return create_or_get_user(
username=user_info["userPrincipalName"],
displayName=user_info["displayName"])
def get_github_user():
user_info = flask_dance.contrib.github.github.get("/user").json()
return create_or_get_user(
username=user_info["email"],
displayName=user_info["name"])
provider_users = {
'azure': lambda: get_azure_user(),
'github': lambda: get_github_user()
}
provider_blueprint = provider_blueprints[oauth_provider]() # Resolved lambda
#######################
# Blueprint Functions #
#######################
@provider_blueprint.route('/<string:auth_provider>/confirm', methods=['GET'])
def confirm_auth_provider(auth_provider):
if not auth_provider in provider_users:
return redirect('/')
provider_user = provider_users[oauth_provider]() # Resolved lambda
session.regenerate()
return redirect('/')
app.register_blueprint(provider_blueprint, url_prefix=authentication_url_prefix)
print(app.register_blueprint)
###############################
# Application Reconfiguration #
###############################
# ('', 204) is "No Content" code
set_config('registration_visibility', False)
app.view_functions['auth.login'] = lambda: redirect(authentication_url_prefix + "/" + oauth_provider)
app.view_functions['auth.register'] = lambda: ('', 204)
app.view_functions['auth.reset_password'] = lambda: ('', 204)
app.view_functions['auth.confirm'] = lambda: ('', 204)
| 35.557143
| 136
| 0.631579
|
from flask import render_template, session, redirect
from flask_dance.contrib import azure, github
import flask_dance.contrib
import os
from CTFd.auth import confirm, register, reset_password, login
from CTFd.models import db, Users
from CTFd.utils import set_config
from CTFd.utils.logging import log
from CTFd.utils.security.auth import login_user, logout_user
from CTFd import utils
import boto3
import base64
from botocore.exceptions import ClientError
import json
def get_secret():
secret_name = "ctf_azure_sso"
region_name = "eu-west-1"
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name
)
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
return json.loads(secret)
else:
decoded_binary_secret = base64.b64decode(get_secret_value_response['SecretBinary'])
return json.loads(decoded_binary_secret)
def load(app):
db.session.flush()
login_user(user)
return user
def create_or_get_user(username, displayName):
user = retrieve_user_from_database(username)
if user is not None:
login_user(user)
return user
if create_missing_user:
return create_user(username, displayName)
else:
log('logins', "[{date}] {ip} - " + user.name + " - No OAuth2 bridged user found and not configured to create missing users")
return None
re.get("/v1.0/me").json()
return create_or_get_user(
username=user_info["userPrincipalName"],
displayName=user_info["displayName"])
def get_github_user():
user_info = flask_dance.contrib.github.github.get("/user").json()
return create_or_get_user(
username=user_info["email"],
displayName=user_info["name"])
provider_users = {
'azure': lambda: get_azure_user(),
'github': lambda: get_github_user()
}
provider_blueprint = provider_blueprints[oauth_provider]()
| true
| true
|
1c49c504b6dafe6dbc71503afe31a87262f71392
| 950
|
py
|
Python
|
noggin/middleware.py
|
mscherer/noggin
|
0e3be29de02a1ba7aaf247493c5adf7d08e5f64b
|
[
"MIT"
] | null | null | null |
noggin/middleware.py
|
mscherer/noggin
|
0e3be29de02a1ba7aaf247493c5adf7d08e5f64b
|
[
"MIT"
] | null | null | null |
noggin/middleware.py
|
mscherer/noggin
|
0e3be29de02a1ba7aaf247493c5adf7d08e5f64b
|
[
"MIT"
] | null | null | null |
import python_freeipa
from flask import make_response, render_template
class IPAErrorHandler:
def __init__(self, app, error_template):
self.app = app
self.template = error_template
self.init_app()
def init_app(self):
self.app.wsgi_app = IPAWSGIMiddleware(
self.app.wsgi_app, self.get_error_response
)
def get_error_response(self, error):
self.app.logger.error(f"Uncaught IPA exception: {error}")
return make_response(render_template(self.template, error=error), 500)
class IPAWSGIMiddleware:
def __init__(self, wsgi_app, error_factory):
self.wsgi_app = wsgi_app
self.error_factory = error_factory
def __call__(self, environ, start_response):
try:
return self.wsgi_app(environ, start_response)
except python_freeipa.exceptions.FreeIPAError as e:
return self.error_factory(e)(environ, start_response)
| 30.645161
| 78
| 0.690526
|
import python_freeipa
from flask import make_response, render_template
class IPAErrorHandler:
def __init__(self, app, error_template):
self.app = app
self.template = error_template
self.init_app()
def init_app(self):
self.app.wsgi_app = IPAWSGIMiddleware(
self.app.wsgi_app, self.get_error_response
)
def get_error_response(self, error):
self.app.logger.error(f"Uncaught IPA exception: {error}")
return make_response(render_template(self.template, error=error), 500)
class IPAWSGIMiddleware:
def __init__(self, wsgi_app, error_factory):
self.wsgi_app = wsgi_app
self.error_factory = error_factory
def __call__(self, environ, start_response):
try:
return self.wsgi_app(environ, start_response)
except python_freeipa.exceptions.FreeIPAError as e:
return self.error_factory(e)(environ, start_response)
| true
| true
|
1c49c50fb5c3d93475f03b91a1ed2d767b508c0a
| 158
|
py
|
Python
|
Hello_World/main.py
|
sostrowski/python
|
f01ac6f7ca491e10209ce7e3c37647e08d8f90af
|
[
"MIT"
] | null | null | null |
Hello_World/main.py
|
sostrowski/python
|
f01ac6f7ca491e10209ce7e3c37647e08d8f90af
|
[
"MIT"
] | null | null | null |
Hello_World/main.py
|
sostrowski/python
|
f01ac6f7ca491e10209ce7e3c37647e08d8f90af
|
[
"MIT"
] | null | null | null |
# This program says hello and asks for my name.
print('Hello world!')
print('What is your name?')
myName = input()
print('It is good to meet you, ' + myName)
| 26.333333
| 47
| 0.689873
|
print('Hello world!')
print('What is your name?')
myName = input()
print('It is good to meet you, ' + myName)
| true
| true
|
1c49c5326968324058812cb3a486dd5da42909e9
| 4,645
|
py
|
Python
|
qa/rpc-tests/test_framework.py
|
hkaase/TestCoin
|
73c647a99e933085ecc04c1d51491eeb44a922a4
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/test_framework.py
|
hkaase/TestCoin
|
73c647a99e933085ecc04c1d51491eeb44a922a4
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/test_framework.py
|
hkaase/TestCoin
|
73c647a99e933085ecc04c1d51491eeb44a922a4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# Copyright (c) 2014 The Testcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Base class for RPC testing
# Add python-Testcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-Testcoinrpc"))
import shutil
import tempfile
import traceback
from Testcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
class TestcoinTestFramework(object):
# These may be over-ridden by subclasses:
def run_test(self):
for node in self.nodes:
assert_equal(node.getblockcount(), 200)
assert_equal(node.getbalance(), 25*50)
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain(self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = start_nodes(4, self.options.tmpdir)
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
# If we joined network halves, connect the nodes from the joint
# on outward. This ensures that chains are properly reorganised.
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
assert not self.is_network_split
stop_nodes(self.nodes)
wait_Testcoinds()
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
"""
Join the (previously split) network halves together.
"""
assert self.is_network_split
stop_nodes(self.nodes)
wait_Testcoinds()
self.setup_network(False)
def main(self):
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave Testcoinds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing Testcoind/Testcoin-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
if self.options.trace_rpc:
import logging
logging.basicConfig(level=logging.DEBUG)
os.environ['PATH'] = self.options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
try:
if not os.path.isdir(self.options.tmpdir):
os.makedirs(self.options.tmpdir)
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: "+e.message)
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not self.options.nocleanup:
print("Cleaning up")
stop_nodes(self.nodes)
wait_Testcoinds()
shutil.rmtree(self.options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
| 33.178571
| 105
| 0.613563
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-Testcoinrpc"))
import shutil
import tempfile
import traceback
from Testcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
class TestcoinTestFramework(object):
def run_test(self):
for node in self.nodes:
assert_equal(node.getblockcount(), 200)
assert_equal(node.getbalance(), 25*50)
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain(self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = start_nodes(4, self.options.tmpdir)
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
assert not self.is_network_split
stop_nodes(self.nodes)
wait_Testcoinds()
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
assert self.is_network_split
stop_nodes(self.nodes)
wait_Testcoinds()
self.setup_network(False)
def main(self):
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave Testcoinds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing Testcoind/Testcoin-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
if self.options.trace_rpc:
import logging
logging.basicConfig(level=logging.DEBUG)
os.environ['PATH'] = self.options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
try:
if not os.path.isdir(self.options.tmpdir):
os.makedirs(self.options.tmpdir)
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: "+e.message)
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not self.options.nocleanup:
print("Cleaning up")
stop_nodes(self.nodes)
wait_Testcoinds()
shutil.rmtree(self.options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
| true
| true
|
1c49c54ee3d9340721f261f16dae2ce932e7cc4a
| 5,228
|
py
|
Python
|
surgeo/scripts/weighted_mean.py
|
yashd94/surgeo
|
dc449b7332e143d97321bc844739840c4b0c3666
|
[
"MIT"
] | null | null | null |
surgeo/scripts/weighted_mean.py
|
yashd94/surgeo
|
dc449b7332e143d97321bc844739840c4b0c3666
|
[
"MIT"
] | null | null | null |
surgeo/scripts/weighted_mean.py
|
yashd94/surgeo
|
dc449b7332e143d97321bc844739840c4b0c3666
|
[
"MIT"
] | null | null | null |
import csv
import math
import itertools
import io
def get_weighted_mean(percentage_index_numbers,
analyzed_subject_index_numbers,
filepath_in,
filepath_out=''):
'''Gives the weighted mean of a particular data set.
Args:
filepath_in: file path of csv from which data is read
filepath_out: file path of csv where data is written.
If blank, return value.
percentage_index_numbers: tuple of index numbers of %s to use.
analyzed_subject_index_numbers: tuple of index numbers to analyze
Returns:
None, or string depending on filepath_out.
Raises:
None
'''
with open(filepath_in, 'rU') as input_csv:
# First pass, get all data and count up
csv_reader = csv.reader(input_csv)
row_1 = next(csv_reader)
# Create number/header index
name_column_index = {index: header for index, header in
enumerate(row_1)}
summed_percentages = {item: float(0) for item in
percentage_index_numbers}
#TODO better RAM usage
validated_data_rows = []
######### First filter out bad or incomplete rows
for index, row in enumerate(csv_reader, start=1):
try:
chained_index = itertools.chain(percentage_index_numbers,
analyzed_subject_index_numbers)
for positional_number in chained_index:
row_item = row[positional_number]
float(row_item)
validated_data_rows.append(row)
except ValueError:
continue
######### Sum totals
for row in validated_data_rows:
for dictionary_key in summed_percentages.keys():
row_value = row[dictionary_key]
summed_percentages[dictionary_key] += float(row_value)
######### Calculate weighted mean for each analyzed subject matter
summary_text = io.StringIO('')
for subject_index_number in analyzed_subject_index_numbers:
# Setup numbers
weighted_mean = {item: float(0) for item in
percentage_index_numbers}
weighted_stdev = {item: float(0) for item in
percentage_index_numbers}
list_of_subject_values = []
# Accumulate weighted mean
for row in validated_data_rows:
for key in weighted_mean.keys():
# row[key] is percentage
# summed_percentages[key] is aggregate percentage
# row[subject_index_number] is subject (e.g. balance, APR)
weighted_mean[key] += (float(row[key]) /
float(summed_percentages[key]) *
float(row[subject_index_number]))
list_of_subject_values.append(float(row[subject_index_number]))
# Accumulate weighted stdev
for row in validated_data_rows:
for key in weighted_mean.keys():
# row[key] is percentage
# summed_percentages[key] is aggregate percentage
# row[subject_index_number] is subject (e.g. balance, APR)
difference = (float(row[subject_index_number]) -
float(weighted_mean[key]))
difference_squared = math.pow(float(difference), 2)
weighted_stdev[key] += math.sqrt((float(row[key]) /
float(summed_percentages[key])
* difference_squared))
sample_mean = sum(list_of_subject_values) / len(list_of_subject_values)
distance_from_mean = [math.pow((value - sample_mean), 2) for value in
list_of_subject_values]
variance = sum(distance_from_mean) / len(list_of_subject_values)
sample_std_dev = math.sqrt(variance)
summary_text.write(''.join(['\n##########\n',
name_column_index[subject_index_number],
'\n##########\n',
'sample mean: ',
str(sample_mean),
'\n',
'sample standard deviation: ',
str(sample_std_dev),
'\n\n']))
for key in weighted_mean.keys():
summary_text.write(str(name_column_index[key]))
summary_text.write(' weighted mean: ')
summary_text.write(str(weighted_mean[key]))
summary_text.write(str('\n'))
summary_text.write(str(name_column_index[key]))
summary_text.write(' weighted stdev: ')
summary_text.write(str(weighted_stdev[key]))
summary_text.write('\n')
text_output = summary_text.getvalue()
summary_text.close()
if filepath_out == '':
return text_output
else:
with open(filepath_out, 'w+') as f:
f.write(text_output)
| 45.068966
| 79
| 0.547437
|
import csv
import math
import itertools
import io
def get_weighted_mean(percentage_index_numbers,
analyzed_subject_index_numbers,
filepath_in,
filepath_out=''):
with open(filepath_in, 'rU') as input_csv:
csv_reader = csv.reader(input_csv)
row_1 = next(csv_reader)
name_column_index = {index: header for index, header in
enumerate(row_1)}
summed_percentages = {item: float(0) for item in
percentage_index_numbers}
validated_data_rows = []
float(row_item)
validated_data_rows.append(row)
except ValueError:
continue
ctionary_key]
summed_percentages[dictionary_key] += float(row_value)
weighted_mean[key] += (float(row[key]) /
float(summed_percentages[key]) *
float(row[subject_index_number]))
list_of_subject_values.append(float(row[subject_index_number]))
for row in validated_data_rows:
for key in weighted_mean.keys():
difference = (float(row[subject_index_number]) -
float(weighted_mean[key]))
difference_squared = math.pow(float(difference), 2)
weighted_stdev[key] += math.sqrt((float(row[key]) /
float(summed_percentages[key])
* difference_squared))
sample_mean = sum(list_of_subject_values) / len(list_of_subject_values)
distance_from_mean = [math.pow((value - sample_mean), 2) for value in
list_of_subject_values]
variance = sum(distance_from_mean) / len(list_of_subject_values)
sample_std_dev = math.sqrt(variance)
summary_text.write(''.join(['\n##########\n',
name_column_index[subject_index_number],
'\n##########\n',
'sample mean: ',
str(sample_mean),
'\n',
'sample standard deviation: ',
str(sample_std_dev),
'\n\n']))
for key in weighted_mean.keys():
summary_text.write(str(name_column_index[key]))
summary_text.write(' weighted mean: ')
summary_text.write(str(weighted_mean[key]))
summary_text.write(str('\n'))
summary_text.write(str(name_column_index[key]))
summary_text.write(' weighted stdev: ')
summary_text.write(str(weighted_stdev[key]))
summary_text.write('\n')
text_output = summary_text.getvalue()
summary_text.close()
if filepath_out == '':
return text_output
else:
with open(filepath_out, 'w+') as f:
f.write(text_output)
| true
| true
|
1c49c5b532ee2b7117c40d61eb0624ccde59523b
| 17,483
|
py
|
Python
|
qa/common/trace_summary.py
|
akbargumbira/server
|
a087c141c62923b61543651eeb5f134806cbaf2d
|
[
"BSD-3-Clause"
] | null | null | null |
qa/common/trace_summary.py
|
akbargumbira/server
|
a087c141c62923b61543651eeb5f134806cbaf2d
|
[
"BSD-3-Clause"
] | null | null | null |
qa/common/trace_summary.py
|
akbargumbira/server
|
a087c141c62923b61543651eeb5f134806cbaf2d
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import json
import sys
import numpy as np
FLAGS = None
def add_span(span_map, timestamps, span_name, ts_start, ts_end):
for tag in (ts_start, ts_end):
if tag not in timestamps:
raise ValueError('timestamps missing "{}": {}'.format(
tag, timestamps))
if timestamps[ts_end] < timestamps[ts_start]:
raise ValueError('end timestamp "{}" < start timestamp "{}"'.format(
ts_end, ts_start))
if span_name not in span_map:
span_map[span_name] = 0
span_map[span_name] += timestamps[ts_end] - timestamps[ts_start]
class AbstractFrontend():
@property
def filter_timestamp(self):
return None
def add_frontend_span(self, span_map, timestamps):
pass
def summarize_frontend_span(self, span_map, cnt):
return None
class HttpFrontend(AbstractFrontend):
@property
def filter_timestamp(self):
return "HTTP_RECV_START"
def add_frontend_span(self, span_map, timestamps):
if ("HTTP_RECV_START" in timestamps) and ("HTTP_SEND_END"
in timestamps):
add_span(span_map, timestamps, "HTTP_INFER", "HTTP_RECV_START",
"HTTP_SEND_END")
add_span(span_map, timestamps, "HTTP_RECV", "HTTP_RECV_START",
"HTTP_RECV_END")
add_span(span_map, timestamps, "HTTP_SEND", "HTTP_SEND_START",
"HTTP_SEND_END")
def summarize_frontend_span(self, span_map, cnt):
if "HTTP_INFER" in span_map:
res = "HTTP infer request (avg): {}us\n".format(
span_map["HTTP_INFER"] / (cnt * 1000))
res += "\tReceive (avg): {}us\n".format(span_map["HTTP_RECV"] /
(cnt * 1000))
res += "\tSend (avg): {}us\n".format(span_map["HTTP_SEND"] /
(cnt * 1000))
res += "\tOverhead (avg): {}us\n".format(
(span_map["HTTP_INFER"] - span_map["REQUEST"] -
span_map["HTTP_RECV"] - span_map["HTTP_SEND"]) / (cnt * 1000))
return res
else:
return None
class GrpcFrontend(AbstractFrontend):
@property
def filter_timestamp(self):
return "GRPC_WAITREAD_START"
def add_frontend_span(self, span_map, timestamps):
if ("GRPC_WAITREAD_START" in timestamps) and ("GRPC_SEND_END"
in timestamps):
add_span(span_map, timestamps, "GRPC_INFER", "GRPC_WAITREAD_START",
"GRPC_SEND_END")
add_span(span_map, timestamps, "GRPC_WAITREAD",
"GRPC_WAITREAD_START", "GRPC_WAITREAD_END")
add_span(span_map, timestamps, "GRPC_SEND", "GRPC_SEND_START",
"GRPC_SEND_END")
def summarize_frontend_span(self, span_map, cnt):
if "GRPC_INFER" in span_map:
res = "GRPC infer request (avg): {}us\n".format(
span_map["GRPC_INFER"] / (cnt * 1000))
res += "\tWait/Read (avg): {}us\n".format(
span_map["GRPC_WAITREAD"] / (cnt * 1000))
res += "\tSend (avg): {}us\n".format(span_map["GRPC_SEND"] /
(cnt * 1000))
res += "\tOverhead (avg): {}us\n".format(
(span_map["GRPC_INFER"] - span_map["REQUEST"] -
span_map["GRPC_WAITREAD"] - span_map["GRPC_SEND"]) /
(cnt * 1000))
return res
else:
return None
def summarize(frontend, traces):
# map from (model_name, model_version) to # of traces
model_count_map = dict()
# map from (model_name, model_version) to map of span->total time
model_span_map = dict()
# Order traces by id to be more intuitive if 'show_trace'
traces = sorted(traces, key=lambda t: t.get('id', -1))
# Filter the trace that is not for the requested frontend
match_frontend_id_set = set()
# Filter the trace that is not meaningful and group them by 'id'
filtered_traces = dict()
for trace in traces:
if "id" not in trace:
continue
# Trace without a parent must contain frontend timestamps
add_trace = False
if "parent_id" not in trace:
if frontend.filter_timestamp is None:
continue
if "timestamps" in trace:
for ts in trace["timestamps"]:
if frontend.filter_timestamp in ts["name"]:
match_frontend_id_set.add(trace["id"])
if trace["id"] in match_frontend_id_set:
add_trace = True
# Otherwise need to check whether parent is filtered
elif trace["parent_id"] in match_frontend_id_set:
match_frontend_id_set.add(trace["id"])
add_trace = True
if add_trace:
if (trace['id'] in filtered_traces.keys()):
rep_trace = filtered_traces[trace['id']]
# Apend the timestamp to the trace representing this 'id'
if "model_name" in trace:
rep_trace["model_name"] = trace["model_name"]
if "model_version" in trace:
rep_trace["model_version"] = trace["model_version"]
if "timestamps" in trace:
rep_trace["timestamps"] += trace["timestamps"]
else:
# Use this trace to represent this 'id'
if "timestamps" not in trace:
trace["timestamps"] = []
filtered_traces[trace['id']] = trace
for trace_id, trace in filtered_traces.items():
if trace_id not in match_frontend_id_set:
filtered_traces.pop(trace_id, None)
continue
timestamps = dict()
for ts in trace["timestamps"]:
timestamps[ts["name"]] = ts["ns"]
if ("REQUEST_START" in timestamps) and ("REQUEST_END" in timestamps):
key = (trace["model_name"], trace["model_version"])
if key not in model_count_map:
model_count_map[key] = 0
model_span_map[key] = dict()
model_count_map[key] += 1
frontend.add_frontend_span(model_span_map[key], timestamps)
add_span(model_span_map[key], timestamps, "REQUEST",
"REQUEST_START", "REQUEST_END")
# The tags below will be missing for ensemble model
if ("QUEUE_START" in timestamps) and ("COMPUTE_START"
in timestamps):
add_span(model_span_map[key], timestamps, "QUEUE",
"QUEUE_START", "COMPUTE_START")
if ("COMPUTE_START" in timestamps) and ("COMPUTE_END"
in timestamps):
add_span(model_span_map[key], timestamps, "COMPUTE",
"COMPUTE_START", "COMPUTE_END")
if ("COMPUTE_INPUT_END" in timestamps) and ("COMPUTE_OUTPUT_START"
in timestamps):
add_span(model_span_map[key], timestamps, "COMPUTE_INPUT",
"COMPUTE_START", "COMPUTE_INPUT_END")
add_span(model_span_map[key], timestamps, "COMPUTE_INFER",
"COMPUTE_INPUT_END", "COMPUTE_OUTPUT_START")
add_span(model_span_map[key], timestamps, "COMPUTE_OUTPUT",
"COMPUTE_OUTPUT_START", "COMPUTE_END")
if FLAGS.show_trace:
print("{} ({}):".format(trace["model_name"],
trace["model_version"]))
print("\tid: {}".format(trace["id"]))
if "parent_id" in trace:
print("\tparent id: {}".format(trace["parent_id"]))
ordered_timestamps = list()
for ts in trace["timestamps"]:
ordered_timestamps.append((ts["name"], ts["ns"]))
ordered_timestamps.sort(key=lambda tup: tup[1])
now = None
for ts in ordered_timestamps:
if now is not None:
print("\t\t{}us".format((ts[1] - now) / 1000))
print("\t{}".format(ts[0]))
now = ts[1]
for key, cnt in model_count_map.items():
model_name, model_value = key
print("Summary for {} ({}): trace count = {}".format(
model_name, model_value, cnt))
frontend_summary = frontend.summarize_frontend_span(
model_span_map[key], cnt)
if frontend_summary is not None:
print(frontend_summary)
# collect handler timeline
print("\tHandler (avg): {}us".format(model_span_map[key]["REQUEST"] /
(cnt * 1000)))
if ("QUEUE"
in model_span_map[key]) and "COMPUTE" in model_span_map[key]:
print("\t\tOverhead (avg): {}us".format(
(model_span_map[key]["REQUEST"] - model_span_map[key]["QUEUE"] -
model_span_map[key]["COMPUTE"]) / (cnt * 1000)))
print("\t\tQueue (avg): {}us".format(model_span_map[key]["QUEUE"] /
(cnt * 1000)))
print("\t\tCompute (avg): {}us".format(
model_span_map[key]["COMPUTE"] / (cnt * 1000)))
if ("COMPUTE_INPUT" in model_span_map[key]
) and "COMPUTE_OUTPUT" in model_span_map[key]:
print("\t\t\tInput (avg): {}us".format(
model_span_map[key]["COMPUTE_INPUT"] / (cnt * 1000)))
print("\t\t\tInfer (avg): {}us".format(
model_span_map[key]["COMPUTE_INFER"] / (cnt * 1000)))
print("\t\t\tOutput (avg): {}us".format(
model_span_map[key]["COMPUTE_OUTPUT"] / (cnt * 1000)))
def summarize_dataflow(traces):
# collect data flow
# - parent input
# - child input
# - ...
# - child output
# Order traces by id to be more intuitive if 'show_trace'
traces = sorted(traces, key=lambda t: t.get('id', -1))
# {3: [4, 5, 6], 4: [7]}
dataflow_parent_map = dict()
for trace in traces:
if "id" not in trace:
continue
if "parent_id" in trace:
if trace["parent_id"] not in dataflow_parent_map:
dataflow_parent_map[trace["parent_id"]] = []
dataflow_parent_map[trace["parent_id"]].append(trace["id"])
if len(dataflow_parent_map) == 0:
# print the tensors of model
first_id = find_first_id_with_tensor(traces)
if first_id != 0:
print("Data Flow:")
print_tensor_by_id(first_id, traces, 0, 0)
return
# print the tensors of ensemble
print("Data Flow:")
first_parent_id = list(dataflow_parent_map.items())[0][0]
# {3: {4: {7: None}, 5: None, 6: None}}
dataflow_tree_map = dict()
depth = [0]
append_dataflow_tensor(dataflow_tree_map,
first_parent_id,
dataflow_parent_map,
traces,
depth)
print_dataflow_tensor(dataflow_tree_map, traces, depth[0], step=0)
def append_dataflow_tensor(dataflow_tensor_map,
parent_id,
dataflow_tree_map,
traces,
depth):
if parent_id not in dataflow_tree_map:
dataflow_tensor_map[parent_id] = None
return
child_tensor_map = dict()
dataflow_tensor_map[parent_id] = child_tensor_map
depth[0] = depth[0] + 1
child_ids = dataflow_tree_map[parent_id]
for child_id in child_ids:
append_dataflow_tensor(child_tensor_map, child_id,
dataflow_tree_map, traces, depth)
def print_dataflow_tensor(dataflow_tree_map, traces, depth, step):
for parent_id in dataflow_tree_map:
print_tensor_by_id(parent_id, traces, depth, step)
if dataflow_tree_map[parent_id] is None:
continue
print_dataflow_tensor(
dataflow_tree_map[parent_id], traces, depth, step+1)
def print_tensor_by_id(id, traces, depth, step):
if id == 0:
return
tabs = "\t"*(step+1)
print("{0}{1}".format(tabs, "="*(50+8*(depth-step))))
for trace in traces:
# print model name and version
if "id" in trace and "model_name" in trace and "model_version" in trace and "timestamps" in trace and trace["id"] == id:
print("{0}Name: {1}".format(
tabs, trace["model_name"]))
print("{0}Version:{1}".format(
tabs, trace["model_version"]))
# print data
if "id" in trace and "activity" in trace:
if trace["id"] == id and trace["activity"] == "TENSOR_QUEUE_INPUT":
print("{0}{1}:".format(tabs, "QUEUE_INPUT"))
print("{0}\t{1}: {2}".format(tabs, trace["tensor"]["name"],
get_numpy_array(trace["tensor"])))
elif trace["id"] == id and trace["activity"] == "TENSOR_BACKEND_INPUT":
print("{0}{1}:".format(tabs, "BACKEND_INPUT"))
print("{0}\t{1}: {2}".format(tabs, trace["tensor"]["name"],
get_numpy_array(trace["tensor"])))
elif trace["id"] == id and trace["activity"] == "TENSOR_BACKEND_OUTPUT":
print("{0}{1}:".format(tabs, "BACKEND_OUTPUT"))
print("{0}\t{1}: {2}".format(tabs, trace["tensor"]["name"],
get_numpy_array(trace["tensor"])))
print("{0}{1}".format(tabs, "="*(50+8*(depth-step))))
def find_first_id_with_tensor(traces):
for trace in traces:
if "activity" in trace and (trace["activity"] == "TENSOR_QUEUE_INPUT" or trace["activity"] == "TENSOR_BACKEND_INPUT" or trace["activity"] == "TENSOR_BACKEND_OUTPUT"):
return trace["id"]
return 0
TRITON_TYPE_TO_NUMPY = {
"BOOL": bool,
"UINT8": np.uint8,
"UINT16": np.uint16,
"UINT32": np.uint32,
"UINT64": np.uint64,
"INT8": np.int8,
"INT16": np.int16,
"INT32": np.int32,
"INT64": np.int64,
"FP16": np.float16,
"FP32": np.float32,
"FP64": np.float64,
"BYTES": np.object_
}
def get_numpy_array(tensor):
dtype = TRITON_TYPE_TO_NUMPY[tensor["dtype"]]
value = map(float, tensor["data"].split(","))
shape = map(int, tensor["shape"].split(","))
array = np.array(list(value), dtype=dtype)
array = array.reshape(list(shape))
return array
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-v',
'--verbose',
action="store_true",
required=False,
default=False,
help='Enable verbose output')
parser.add_argument('-t',
'--show-trace',
action="store_true",
required=False,
default=False,
help='Show timestamps for each individual trace')
parser.add_argument('file', type=argparse.FileType('r'), nargs='+')
FLAGS = parser.parse_args()
for f in FLAGS.file:
trace_data = json.loads(f.read())
if FLAGS.verbose:
print(json.dumps(trace_data, sort_keys=True, indent=2))
# Must summarize HTTP and GRPC separately since they have
# different ways of accumulating time.
print("File: {}".format(f.name))
summarize(HttpFrontend(), trace_data)
summarize(GrpcFrontend(), trace_data)
summarize_dataflow(trace_data)
| 40.65814
| 174
| 0.566493
|
import argparse
import json
import sys
import numpy as np
FLAGS = None
def add_span(span_map, timestamps, span_name, ts_start, ts_end):
for tag in (ts_start, ts_end):
if tag not in timestamps:
raise ValueError('timestamps missing "{}": {}'.format(
tag, timestamps))
if timestamps[ts_end] < timestamps[ts_start]:
raise ValueError('end timestamp "{}" < start timestamp "{}"'.format(
ts_end, ts_start))
if span_name not in span_map:
span_map[span_name] = 0
span_map[span_name] += timestamps[ts_end] - timestamps[ts_start]
class AbstractFrontend():
@property
def filter_timestamp(self):
return None
def add_frontend_span(self, span_map, timestamps):
pass
def summarize_frontend_span(self, span_map, cnt):
return None
class HttpFrontend(AbstractFrontend):
@property
def filter_timestamp(self):
return "HTTP_RECV_START"
def add_frontend_span(self, span_map, timestamps):
if ("HTTP_RECV_START" in timestamps) and ("HTTP_SEND_END"
in timestamps):
add_span(span_map, timestamps, "HTTP_INFER", "HTTP_RECV_START",
"HTTP_SEND_END")
add_span(span_map, timestamps, "HTTP_RECV", "HTTP_RECV_START",
"HTTP_RECV_END")
add_span(span_map, timestamps, "HTTP_SEND", "HTTP_SEND_START",
"HTTP_SEND_END")
def summarize_frontend_span(self, span_map, cnt):
if "HTTP_INFER" in span_map:
res = "HTTP infer request (avg): {}us\n".format(
span_map["HTTP_INFER"] / (cnt * 1000))
res += "\tReceive (avg): {}us\n".format(span_map["HTTP_RECV"] /
(cnt * 1000))
res += "\tSend (avg): {}us\n".format(span_map["HTTP_SEND"] /
(cnt * 1000))
res += "\tOverhead (avg): {}us\n".format(
(span_map["HTTP_INFER"] - span_map["REQUEST"] -
span_map["HTTP_RECV"] - span_map["HTTP_SEND"]) / (cnt * 1000))
return res
else:
return None
class GrpcFrontend(AbstractFrontend):
@property
def filter_timestamp(self):
return "GRPC_WAITREAD_START"
def add_frontend_span(self, span_map, timestamps):
if ("GRPC_WAITREAD_START" in timestamps) and ("GRPC_SEND_END"
in timestamps):
add_span(span_map, timestamps, "GRPC_INFER", "GRPC_WAITREAD_START",
"GRPC_SEND_END")
add_span(span_map, timestamps, "GRPC_WAITREAD",
"GRPC_WAITREAD_START", "GRPC_WAITREAD_END")
add_span(span_map, timestamps, "GRPC_SEND", "GRPC_SEND_START",
"GRPC_SEND_END")
def summarize_frontend_span(self, span_map, cnt):
if "GRPC_INFER" in span_map:
res = "GRPC infer request (avg): {}us\n".format(
span_map["GRPC_INFER"] / (cnt * 1000))
res += "\tWait/Read (avg): {}us\n".format(
span_map["GRPC_WAITREAD"] / (cnt * 1000))
res += "\tSend (avg): {}us\n".format(span_map["GRPC_SEND"] /
(cnt * 1000))
res += "\tOverhead (avg): {}us\n".format(
(span_map["GRPC_INFER"] - span_map["REQUEST"] -
span_map["GRPC_WAITREAD"] - span_map["GRPC_SEND"]) /
(cnt * 1000))
return res
else:
return None
def summarize(frontend, traces):
count_map = dict()
model_span_map = dict()
traces = sorted(traces, key=lambda t: t.get('id', -1))
match_frontend_id_set = set()
filtered_traces = dict()
for trace in traces:
if "id" not in trace:
continue
add_trace = False
if "parent_id" not in trace:
if frontend.filter_timestamp is None:
continue
if "timestamps" in trace:
for ts in trace["timestamps"]:
if frontend.filter_timestamp in ts["name"]:
match_frontend_id_set.add(trace["id"])
if trace["id"] in match_frontend_id_set:
add_trace = True
elif trace["parent_id"] in match_frontend_id_set:
match_frontend_id_set.add(trace["id"])
add_trace = True
if add_trace:
if (trace['id'] in filtered_traces.keys()):
rep_trace = filtered_traces[trace['id']]
if "model_name" in trace:
rep_trace["model_name"] = trace["model_name"]
if "model_version" in trace:
rep_trace["model_version"] = trace["model_version"]
if "timestamps" in trace:
rep_trace["timestamps"] += trace["timestamps"]
else:
if "timestamps" not in trace:
trace["timestamps"] = []
filtered_traces[trace['id']] = trace
for trace_id, trace in filtered_traces.items():
if trace_id not in match_frontend_id_set:
filtered_traces.pop(trace_id, None)
continue
timestamps = dict()
for ts in trace["timestamps"]:
timestamps[ts["name"]] = ts["ns"]
if ("REQUEST_START" in timestamps) and ("REQUEST_END" in timestamps):
key = (trace["model_name"], trace["model_version"])
if key not in model_count_map:
model_count_map[key] = 0
model_span_map[key] = dict()
model_count_map[key] += 1
frontend.add_frontend_span(model_span_map[key], timestamps)
add_span(model_span_map[key], timestamps, "REQUEST",
"REQUEST_START", "REQUEST_END")
if ("QUEUE_START" in timestamps) and ("COMPUTE_START"
in timestamps):
add_span(model_span_map[key], timestamps, "QUEUE",
"QUEUE_START", "COMPUTE_START")
if ("COMPUTE_START" in timestamps) and ("COMPUTE_END"
in timestamps):
add_span(model_span_map[key], timestamps, "COMPUTE",
"COMPUTE_START", "COMPUTE_END")
if ("COMPUTE_INPUT_END" in timestamps) and ("COMPUTE_OUTPUT_START"
in timestamps):
add_span(model_span_map[key], timestamps, "COMPUTE_INPUT",
"COMPUTE_START", "COMPUTE_INPUT_END")
add_span(model_span_map[key], timestamps, "COMPUTE_INFER",
"COMPUTE_INPUT_END", "COMPUTE_OUTPUT_START")
add_span(model_span_map[key], timestamps, "COMPUTE_OUTPUT",
"COMPUTE_OUTPUT_START", "COMPUTE_END")
if FLAGS.show_trace:
print("{} ({}):".format(trace["model_name"],
trace["model_version"]))
print("\tid: {}".format(trace["id"]))
if "parent_id" in trace:
print("\tparent id: {}".format(trace["parent_id"]))
ordered_timestamps = list()
for ts in trace["timestamps"]:
ordered_timestamps.append((ts["name"], ts["ns"]))
ordered_timestamps.sort(key=lambda tup: tup[1])
now = None
for ts in ordered_timestamps:
if now is not None:
print("\t\t{}us".format((ts[1] - now) / 1000))
print("\t{}".format(ts[0]))
now = ts[1]
for key, cnt in model_count_map.items():
model_name, model_value = key
print("Summary for {} ({}): trace count = {}".format(
model_name, model_value, cnt))
frontend_summary = frontend.summarize_frontend_span(
model_span_map[key], cnt)
if frontend_summary is not None:
print(frontend_summary)
print("\tHandler (avg): {}us".format(model_span_map[key]["REQUEST"] /
(cnt * 1000)))
if ("QUEUE"
in model_span_map[key]) and "COMPUTE" in model_span_map[key]:
print("\t\tOverhead (avg): {}us".format(
(model_span_map[key]["REQUEST"] - model_span_map[key]["QUEUE"] -
model_span_map[key]["COMPUTE"]) / (cnt * 1000)))
print("\t\tQueue (avg): {}us".format(model_span_map[key]["QUEUE"] /
(cnt * 1000)))
print("\t\tCompute (avg): {}us".format(
model_span_map[key]["COMPUTE"] / (cnt * 1000)))
if ("COMPUTE_INPUT" in model_span_map[key]
) and "COMPUTE_OUTPUT" in model_span_map[key]:
print("\t\t\tInput (avg): {}us".format(
model_span_map[key]["COMPUTE_INPUT"] / (cnt * 1000)))
print("\t\t\tInfer (avg): {}us".format(
model_span_map[key]["COMPUTE_INFER"] / (cnt * 1000)))
print("\t\t\tOutput (avg): {}us".format(
model_span_map[key]["COMPUTE_OUTPUT"] / (cnt * 1000)))
def summarize_dataflow(traces):
traces = sorted(traces, key=lambda t: t.get('id', -1))
dataflow_parent_map = dict()
for trace in traces:
if "id" not in trace:
continue
if "parent_id" in trace:
if trace["parent_id"] not in dataflow_parent_map:
dataflow_parent_map[trace["parent_id"]] = []
dataflow_parent_map[trace["parent_id"]].append(trace["id"])
if len(dataflow_parent_map) == 0:
first_id = find_first_id_with_tensor(traces)
if first_id != 0:
print("Data Flow:")
print_tensor_by_id(first_id, traces, 0, 0)
return
print("Data Flow:")
first_parent_id = list(dataflow_parent_map.items())[0][0]
dataflow_tree_map = dict()
depth = [0]
append_dataflow_tensor(dataflow_tree_map,
first_parent_id,
dataflow_parent_map,
traces,
depth)
print_dataflow_tensor(dataflow_tree_map, traces, depth[0], step=0)
def append_dataflow_tensor(dataflow_tensor_map,
parent_id,
dataflow_tree_map,
traces,
depth):
if parent_id not in dataflow_tree_map:
dataflow_tensor_map[parent_id] = None
return
child_tensor_map = dict()
dataflow_tensor_map[parent_id] = child_tensor_map
depth[0] = depth[0] + 1
child_ids = dataflow_tree_map[parent_id]
for child_id in child_ids:
append_dataflow_tensor(child_tensor_map, child_id,
dataflow_tree_map, traces, depth)
def print_dataflow_tensor(dataflow_tree_map, traces, depth, step):
for parent_id in dataflow_tree_map:
print_tensor_by_id(parent_id, traces, depth, step)
if dataflow_tree_map[parent_id] is None:
continue
print_dataflow_tensor(
dataflow_tree_map[parent_id], traces, depth, step+1)
def print_tensor_by_id(id, traces, depth, step):
if id == 0:
return
tabs = "\t"*(step+1)
print("{0}{1}".format(tabs, "="*(50+8*(depth-step))))
for trace in traces:
if "id" in trace and "model_name" in trace and "model_version" in trace and "timestamps" in trace and trace["id"] == id:
print("{0}Name: {1}".format(
tabs, trace["model_name"]))
print("{0}Version:{1}".format(
tabs, trace["model_version"]))
if "id" in trace and "activity" in trace:
if trace["id"] == id and trace["activity"] == "TENSOR_QUEUE_INPUT":
print("{0}{1}:".format(tabs, "QUEUE_INPUT"))
print("{0}\t{1}: {2}".format(tabs, trace["tensor"]["name"],
get_numpy_array(trace["tensor"])))
elif trace["id"] == id and trace["activity"] == "TENSOR_BACKEND_INPUT":
print("{0}{1}:".format(tabs, "BACKEND_INPUT"))
print("{0}\t{1}: {2}".format(tabs, trace["tensor"]["name"],
get_numpy_array(trace["tensor"])))
elif trace["id"] == id and trace["activity"] == "TENSOR_BACKEND_OUTPUT":
print("{0}{1}:".format(tabs, "BACKEND_OUTPUT"))
print("{0}\t{1}: {2}".format(tabs, trace["tensor"]["name"],
get_numpy_array(trace["tensor"])))
print("{0}{1}".format(tabs, "="*(50+8*(depth-step))))
def find_first_id_with_tensor(traces):
for trace in traces:
if "activity" in trace and (trace["activity"] == "TENSOR_QUEUE_INPUT" or trace["activity"] == "TENSOR_BACKEND_INPUT" or trace["activity"] == "TENSOR_BACKEND_OUTPUT"):
return trace["id"]
return 0
TRITON_TYPE_TO_NUMPY = {
"BOOL": bool,
"UINT8": np.uint8,
"UINT16": np.uint16,
"UINT32": np.uint32,
"UINT64": np.uint64,
"INT8": np.int8,
"INT16": np.int16,
"INT32": np.int32,
"INT64": np.int64,
"FP16": np.float16,
"FP32": np.float32,
"FP64": np.float64,
"BYTES": np.object_
}
def get_numpy_array(tensor):
dtype = TRITON_TYPE_TO_NUMPY[tensor["dtype"]]
value = map(float, tensor["data"].split(","))
shape = map(int, tensor["shape"].split(","))
array = np.array(list(value), dtype=dtype)
array = array.reshape(list(shape))
return array
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-v',
'--verbose',
action="store_true",
required=False,
default=False,
help='Enable verbose output')
parser.add_argument('-t',
'--show-trace',
action="store_true",
required=False,
default=False,
help='Show timestamps for each individual trace')
parser.add_argument('file', type=argparse.FileType('r'), nargs='+')
FLAGS = parser.parse_args()
for f in FLAGS.file:
trace_data = json.loads(f.read())
if FLAGS.verbose:
print(json.dumps(trace_data, sort_keys=True, indent=2))
print("File: {}".format(f.name))
summarize(HttpFrontend(), trace_data)
summarize(GrpcFrontend(), trace_data)
summarize_dataflow(trace_data)
| true
| true
|
1c49c641c007103fba76421fc78571e90fbff9c1
| 1,284
|
py
|
Python
|
cpm/code/leastSquareSolver.py
|
jvc2688/cpm
|
409e9ada39fc6238a63a75fb8474a3af70410347
|
[
"MIT"
] | 1
|
2015-08-13T19:26:23.000Z
|
2015-08-13T19:26:23.000Z
|
cpm/code/leastSquareSolver.py
|
jvc2688/cpm
|
409e9ada39fc6238a63a75fb8474a3af70410347
|
[
"MIT"
] | null | null | null |
cpm/code/leastSquareSolver.py
|
jvc2688/cpm
|
409e9ada39fc6238a63a75fb8474a3af70410347
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["linear_least_squares"]
import numpy as np
from scipy import linalg
def linear_least_squares(A, y, yvar=None, l2=None):
"""
Solve a linear system as fast as possible.
:param A: ``(ndata, nbasis)``
The basis matrix.
:param y: ``(ndata)``
The observations.
:param yvar:
The observational variance of the points ``y``.
:param l2:
The L2 regularization strength. Can be a scalar or a vector (of length
``A.shape[1]``).
"""
# Incorporate the observational uncertainties.
if yvar is not None:
CiA = A / yvar[:, None]
Ciy = y / yvar[:, None]
else:
CiA = A
Ciy = y
# Compute the pre-factor.
AT = A.T
ATA = np.dot(AT, CiA)
# Incorporate any L2 regularization.
if l2 is not None:
if np.isscalar(l2):
l2 = l2 + np.zeros(A.shape[1])
ATA[np.diag_indices_from(ATA)] += l2
# Solve the equations overwriting the temporary arrays for speed.
factor = linalg.cho_factor(ATA, overwrite_a=True)
return linalg.cho_solve(factor, np.dot(AT, Ciy), overwrite_b=True)
| 26.204082
| 78
| 0.57243
|
from __future__ import division, print_function
__all__ = ["linear_least_squares"]
import numpy as np
from scipy import linalg
def linear_least_squares(A, y, yvar=None, l2=None):
if yvar is not None:
CiA = A / yvar[:, None]
Ciy = y / yvar[:, None]
else:
CiA = A
Ciy = y
AT = A.T
ATA = np.dot(AT, CiA)
if l2 is not None:
if np.isscalar(l2):
l2 = l2 + np.zeros(A.shape[1])
ATA[np.diag_indices_from(ATA)] += l2
factor = linalg.cho_factor(ATA, overwrite_a=True)
return linalg.cho_solve(factor, np.dot(AT, Ciy), overwrite_b=True)
| true
| true
|
1c49c75a54956fe7a7977bc79452b0fbce941fc1
| 157
|
py
|
Python
|
solution/lcp06.py
|
sth4nothing/pyleetcode
|
70ac2dc55b0cbcd243b38103a96dd796538a3c05
|
[
"MIT"
] | null | null | null |
solution/lcp06.py
|
sth4nothing/pyleetcode
|
70ac2dc55b0cbcd243b38103a96dd796538a3c05
|
[
"MIT"
] | null | null | null |
solution/lcp06.py
|
sth4nothing/pyleetcode
|
70ac2dc55b0cbcd243b38103a96dd796538a3c05
|
[
"MIT"
] | null | null | null |
import math
from typing import List
class Solution:
def minCount(self, coins: List[int]) -> int:
return sum(map(lambda c:math.ceil(c/2), coins))
| 26.166667
| 55
| 0.681529
|
import math
from typing import List
class Solution:
def minCount(self, coins: List[int]) -> int:
return sum(map(lambda c:math.ceil(c/2), coins))
| true
| true
|
1c49c954463613879c282f1b71d006111054bf6c
| 4,568
|
py
|
Python
|
pype/plugins/maya/publish/validate_muster_connection.py
|
tokejepsen/pype
|
8f2b2b631cc5d3ad93eeb5ad3bc6110d32466ed3
|
[
"MIT"
] | null | null | null |
pype/plugins/maya/publish/validate_muster_connection.py
|
tokejepsen/pype
|
8f2b2b631cc5d3ad93eeb5ad3bc6110d32466ed3
|
[
"MIT"
] | null | null | null |
pype/plugins/maya/publish/validate_muster_connection.py
|
tokejepsen/pype
|
8f2b2b631cc5d3ad93eeb5ad3bc6110d32466ed3
|
[
"MIT"
] | null | null | null |
import os
import json
import appdirs
import pyblish.api
from avalon.vendor import requests
from pype.plugin import contextplugin_should_run
import pype.maya.action
class ValidateMusterConnection(pyblish.api.ContextPlugin):
"""
Validate Muster REST API Service is running and we have valid auth token
"""
label = "Validate Muster REST API Service"
order = pyblish.api.ValidatorOrder
hosts = ["maya"]
families = ["renderlayer"]
token = None
if not os.environ.get("MUSTER_REST_URL"):
active = False
actions = [pype.api.RepairAction]
def process(self, context):
# Workaround bug pyblish-base#250
if not contextplugin_should_run(self, context):
return
# test if we have environment set (redundant as this plugin shouldn'
# be active otherwise).
try:
MUSTER_REST_URL = os.environ["MUSTER_REST_URL"]
except KeyError:
self.log.error("Muster REST API url not found.")
raise ValueError("Muster REST API url not found.")
# Load credentials
try:
self._load_credentials()
except RuntimeError:
self.log.error("invalid or missing access token")
assert self._token is not None, "Invalid or missing token"
# We have token, lets do trivial query to web api to see if we can
# connect and access token is valid.
params = {
'authToken': self._token
}
api_entry = '/api/pools/list'
response = self._requests_get(
MUSTER_REST_URL + api_entry, params=params)
assert response.status_code == 200, "invalid response from server"
assert response.json()['ResponseData'], "invalid data in response"
def _load_credentials(self):
"""
Load Muster credentials from file and set `MUSTER_USER`,
`MUSTER_PASSWORD`, `MUSTER_REST_URL` is loaded from presets.
.. todo::
Show login dialog if access token is invalid or missing.
"""
app_dir = os.path.normpath(
appdirs.user_data_dir('pype-app', 'pype')
)
file_name = 'muster_cred.json'
fpath = os.path.join(app_dir, file_name)
file = open(fpath, 'r')
muster_json = json.load(file)
self._token = muster_json.get('token', None)
if not self._token:
raise RuntimeError("Invalid access token for Muster")
file.close()
self.MUSTER_REST_URL = os.environ.get("MUSTER_REST_URL")
if not self.MUSTER_REST_URL:
raise AttributeError("Muster REST API url not set")
@classmethod
def repair(cls, instance):
"""
Renew authentication token by logging into Muster
"""
api_url = "{}/muster/show_login".format(
os.environ["PYPE_REST_API_URL"])
cls.log.debug(api_url)
response = cls._requests_post(api_url, timeout=1)
if response.status_code != 200:
cls.log.error('Cannot show login form to Muster')
raise Exception('Cannot show login form to Muster')
def _requests_post(self, *args, **kwargs):
""" Wrapper for requests, disabling SSL certificate validation if
DONT_VERIFY_SSL environment variable is found. This is useful when
Deadline or Muster server are running with self-signed certificates
and their certificate is not added to trusted certificates on
client machines.
WARNING: disabling SSL certificate validation is defeating one line
of defense SSL is providing and it is not recommended.
"""
if 'verify' not in kwargs:
kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa
return requests.post(*args, **kwargs)
def _requests_get(self, *args, **kwargs):
""" Wrapper for requests, disabling SSL certificate validation if
DONT_VERIFY_SSL environment variable is found. This is useful when
Deadline or Muster server are running with self-signed certificates
and their certificate is not added to trusted certificates on
client machines.
WARNING: disabling SSL certificate validation is defeating one line
of defense SSL is providing and it is not recommended.
"""
if 'verify' not in kwargs:
kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa
return requests.get(*args, **kwargs)
| 37.442623
| 97
| 0.637916
|
import os
import json
import appdirs
import pyblish.api
from avalon.vendor import requests
from pype.plugin import contextplugin_should_run
import pype.maya.action
class ValidateMusterConnection(pyblish.api.ContextPlugin):
label = "Validate Muster REST API Service"
order = pyblish.api.ValidatorOrder
hosts = ["maya"]
families = ["renderlayer"]
token = None
if not os.environ.get("MUSTER_REST_URL"):
active = False
actions = [pype.api.RepairAction]
def process(self, context):
if not contextplugin_should_run(self, context):
return
# be active otherwise).
try:
MUSTER_REST_URL = os.environ["MUSTER_REST_URL"]
except KeyError:
self.log.error("Muster REST API url not found.")
raise ValueError("Muster REST API url not found.")
# Load credentials
try:
self._load_credentials()
except RuntimeError:
self.log.error("invalid or missing access token")
assert self._token is not None, "Invalid or missing token"
# We have token, lets do trivial query to web api to see if we can
# connect and access token is valid.
params = {
'authToken': self._token
}
api_entry = '/api/pools/list'
response = self._requests_get(
MUSTER_REST_URL + api_entry, params=params)
assert response.status_code == 200, "invalid response from server"
assert response.json()['ResponseData'], "invalid data in response"
def _load_credentials(self):
app_dir = os.path.normpath(
appdirs.user_data_dir('pype-app', 'pype')
)
file_name = 'muster_cred.json'
fpath = os.path.join(app_dir, file_name)
file = open(fpath, 'r')
muster_json = json.load(file)
self._token = muster_json.get('token', None)
if not self._token:
raise RuntimeError("Invalid access token for Muster")
file.close()
self.MUSTER_REST_URL = os.environ.get("MUSTER_REST_URL")
if not self.MUSTER_REST_URL:
raise AttributeError("Muster REST API url not set")
@classmethod
def repair(cls, instance):
api_url = "{}/muster/show_login".format(
os.environ["PYPE_REST_API_URL"])
cls.log.debug(api_url)
response = cls._requests_post(api_url, timeout=1)
if response.status_code != 200:
cls.log.error('Cannot show login form to Muster')
raise Exception('Cannot show login form to Muster')
def _requests_post(self, *args, **kwargs):
if 'verify' not in kwargs:
kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa
return requests.post(*args, **kwargs)
def _requests_get(self, *args, **kwargs):
if 'verify' not in kwargs:
kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa
return requests.get(*args, **kwargs)
| true
| true
|
1c49c9837d339902372100015afa8dd09aa825df
| 718
|
py
|
Python
|
tests/main.py
|
deeso/json-search-replace
|
d1dd75cfaecb65bf8fcbad0c80a0bd839eccaa8d
|
[
"Apache-2.0"
] | 1
|
2019-02-08T14:42:45.000Z
|
2019-02-08T14:42:45.000Z
|
tests/main.py
|
deeso/manipin-json
|
d1dd75cfaecb65bf8fcbad0c80a0bd839eccaa8d
|
[
"Apache-2.0"
] | null | null | null |
tests/main.py
|
deeso/manipin-json
|
d1dd75cfaecb65bf8fcbad0c80a0bd839eccaa8d
|
[
"Apache-2.0"
] | null | null | null |
from wrapper_tests.upsert_test import *
from wrapper_tests.upsertvaluedict_test import *
import os
import logging
import sys
import argparse
import signal
logging.getLogger().setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s - %(name)s] %(message)s')
ch.setFormatter(formatter)
logging.getLogger().addHandler(ch)
parser = argparse.ArgumentParser(
description='Unit testing for fiery snap.')
parser.add_argument('-config', type=str, default=None,
help='toml config for keys and such, see key.toml')
if __name__ == '__main__':
unittest.main()
os.kill(os.getpid(), signal.SIGKILL)
| 26.592593
| 71
| 0.721448
|
from wrapper_tests.upsert_test import *
from wrapper_tests.upsertvaluedict_test import *
import os
import logging
import sys
import argparse
import signal
logging.getLogger().setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s - %(name)s] %(message)s')
ch.setFormatter(formatter)
logging.getLogger().addHandler(ch)
parser = argparse.ArgumentParser(
description='Unit testing for fiery snap.')
parser.add_argument('-config', type=str, default=None,
help='toml config for keys and such, see key.toml')
if __name__ == '__main__':
unittest.main()
os.kill(os.getpid(), signal.SIGKILL)
| true
| true
|
1c49c9dd5478932c655374fad541acbfc8952eeb
| 2,650
|
py
|
Python
|
deploy/utils/predictor.py
|
Sibo2rr/PaddleClas
|
b575e002cde44631b2dfc6333f4cfe43f0d0fc81
|
[
"Apache-2.0"
] | 3
|
2021-12-16T06:59:04.000Z
|
2021-12-16T06:59:24.000Z
|
deploy/utils/predictor.py
|
hello3281/PaddleClas
|
8103f010c75ce4b4bee51ede8d057da4c6bd446a
|
[
"Apache-2.0"
] | null | null | null |
deploy/utils/predictor.py
|
hello3281/PaddleClas
|
8103f010c75ce4b4bee51ede8d057da4c6bd446a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import base64
import shutil
import cv2
import numpy as np
from paddle.inference import Config
from paddle.inference import create_predictor
class Predictor(object):
def __init__(self, args, inference_model_dir=None):
# HALF precission predict only work when using tensorrt
if args.use_fp16 is True:
assert args.use_tensorrt is True
self.args = args
self.paddle_predictor, self.config = self.create_paddle_predictor(
args, inference_model_dir)
def predict(self, image):
raise NotImplementedError
def create_paddle_predictor(self, args, inference_model_dir=None):
if inference_model_dir is None:
inference_model_dir = args.inference_model_dir
params_file = os.path.join(inference_model_dir, "inference.pdiparams")
model_file = os.path.join(inference_model_dir, "inference.pdmodel")
config = Config(model_file, params_file)
if args.use_gpu:
config.enable_use_gpu(args.gpu_mem, 0)
else:
config.disable_gpu()
if args.enable_mkldnn:
# cache 10 different shapes for mkldnn to avoid memory leak
config.set_mkldnn_cache_capacity(10)
config.enable_mkldnn()
config.set_cpu_math_library_num_threads(args.cpu_num_threads)
if args.enable_profile:
config.enable_profile()
config.disable_glog_info()
config.switch_ir_optim(args.ir_optim) # default true
if args.use_tensorrt:
config.enable_tensorrt_engine(
precision_mode=Config.Precision.Half
if args.use_fp16 else Config.Precision.Float32,
max_batch_size=args.batch_size,
workspace_size=1 << 30,
min_subgraph_size=30)
config.enable_memory_optim()
# use zero copy
config.switch_use_feed_fetch_ops(False)
predictor = create_predictor(config)
return predictor, config
| 36.805556
| 78
| 0.689434
|
import os
import argparse
import base64
import shutil
import cv2
import numpy as np
from paddle.inference import Config
from paddle.inference import create_predictor
class Predictor(object):
def __init__(self, args, inference_model_dir=None):
if args.use_fp16 is True:
assert args.use_tensorrt is True
self.args = args
self.paddle_predictor, self.config = self.create_paddle_predictor(
args, inference_model_dir)
def predict(self, image):
raise NotImplementedError
def create_paddle_predictor(self, args, inference_model_dir=None):
if inference_model_dir is None:
inference_model_dir = args.inference_model_dir
params_file = os.path.join(inference_model_dir, "inference.pdiparams")
model_file = os.path.join(inference_model_dir, "inference.pdmodel")
config = Config(model_file, params_file)
if args.use_gpu:
config.enable_use_gpu(args.gpu_mem, 0)
else:
config.disable_gpu()
if args.enable_mkldnn:
config.set_mkldnn_cache_capacity(10)
config.enable_mkldnn()
config.set_cpu_math_library_num_threads(args.cpu_num_threads)
if args.enable_profile:
config.enable_profile()
config.disable_glog_info()
config.switch_ir_optim(args.ir_optim)
if args.use_tensorrt:
config.enable_tensorrt_engine(
precision_mode=Config.Precision.Half
if args.use_fp16 else Config.Precision.Float32,
max_batch_size=args.batch_size,
workspace_size=1 << 30,
min_subgraph_size=30)
config.enable_memory_optim()
config.switch_use_feed_fetch_ops(False)
predictor = create_predictor(config)
return predictor, config
| true
| true
|
1c49ca0ede398431f0496d09c172189ae299c254
| 5,375
|
py
|
Python
|
pinax/apps/basic_profiles/views.py
|
skabber/pinax
|
6fdee6b7bbbb597074d45122badf3a6dd75e0b92
|
[
"MIT"
] | 2
|
2015-12-27T23:07:51.000Z
|
2016-05-09T08:57:28.000Z
|
pinax/apps/basic_profiles/views.py
|
SMiGL/pinax
|
d08b2655fe661566bd13c5c170b1a4cad9e67a1d
|
[
"MIT"
] | null | null | null |
pinax/apps/basic_profiles/views.py
|
SMiGL/pinax
|
d08b2655fe661566bd13c5c170b1a4cad9e67a1d
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseForbidden
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext
#from friends.forms import InviteFriendForm
#from friends.models import FriendshipInvitation, Friendship
from basic_profiles.models import Profile
from basic_profiles.forms import ProfileForm
# # used by friend autocompletion
# from gravatar.templatetags.gravatar import gravatar
if "notification" in settings.INSTALLED_APPS:
from notification import models as notification
else:
notification = None
def profiles(request, template_name="basic_profiles/profiles.html"):
return render_to_response(template_name, {
"users": User.objects.all().order_by("-date_joined"),
}, context_instance=RequestContext(request))
def profile(request, username, template_name="basic_profiles/profile.html"):
other_user = get_object_or_404(User, username=username)
if request.user.is_authenticated():
# is_friend = Friendship.objects.are_friends(request.user, other_user)
# other_friends = Friendship.objects.friends_for_user(other_user)
if request.user == other_user:
is_me = True
else:
is_me = False
else:
# other_friends = []
# is_friend = False
is_me = False
# if is_friend:
# invite_form = None
# previous_invitations_to = None
# previous_invitations_from = None
# else:
# if request.user.is_authenticated() and request.method == "POST":
# if request.POST["action"] == "invite":
# invite_form = InviteFriendForm(request.user, request.POST)
# if invite_form.is_valid():
# invite_form.save()
# else:
# invite_form = InviteFriendForm(request.user, {
# 'to_user': username,
# 'message': ugettext("Let's be friends!"),
# })
# if request.POST["action"] == "accept": # @@@ perhaps the form should just post to friends and be redirected here
# invitation_id = request.POST["invitation"]
# try:
# invitation = FriendshipInvitation.objects.get(id=invitation_id)
# if invitation.to_user == request.user:
# invitation.accept()
# request.user.message_set.create(message=_("You have accepted the friendship request from %(from_user)s") % {'from_user': invitation.from_user})
# is_friend = True
# other_friends = Friendship.objects.friends_for_user(other_user)
# except FriendshipInvitation.DoesNotExist:
# pass
# else:
# invite_form = InviteFriendForm(request.user, {
# 'to_user': username,
# 'message': ugettext("Let's be friends!"),
# })
# previous_invitations_to = FriendshipInvitation.objects.filter(to_user=other_user, from_user=request.user)
# previous_invitations_from = FriendshipInvitation.objects.filter(to_user=request.user, from_user=other_user)
if is_me:
if request.method == "POST":
if request.POST["action"] == "update":
profile_form = ProfileForm(request.POST, instance=other_user.get_profile())
if profile_form.is_valid():
profile = profile_form.save(commit=False)
profile.user = other_user
profile.save()
else:
profile_form = ProfileForm(instance=other_user.get_profile())
else:
profile_form = ProfileForm(instance=other_user.get_profile())
else:
profile_form = None
return render_to_response(template_name, {
"profile_form": profile_form,
"is_me": is_me,
# "is_friend": is_friend,
"other_user": other_user,
# "other_friends": other_friends,
# "invite_form": invite_form,
# "previous_invitations_to": previous_invitations_to,
# "previous_invitations_from": previous_invitations_from,
}, context_instance=RequestContext(request))
# def username_autocomplete(request):
# if request.user.is_authenticated():
# q = request.GET.get("q")
# friends = Friendship.objects.friends_for_user(request.user)
# content = []
# for friendship in friends:
# if friendship["friend"].username.lower().startswith(q):
# try:
# profile = friendship["friend"].get_profile()
# entry = "%s,,%s,,%s" % (
# gravatar(friendship["friend"], 40),
# friendship["friend"].username,
# profile.location
# )
# except Profile.DoesNotExist:
# pass
# content.append(entry)
# response = HttpResponse("\n".join(content))
# else:
# response = HttpResponseForbidden()
# setattr(response, "djangologging.suppress_output", True)
# return response
| 43
| 172
| 0.614512
|
from django.conf import settings
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseForbidden
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext
from basic_profiles.models import Profile
from basic_profiles.forms import ProfileForm
s.INSTALLED_APPS:
from notification import models as notification
else:
notification = None
def profiles(request, template_name="basic_profiles/profiles.html"):
return render_to_response(template_name, {
"users": User.objects.all().order_by("-date_joined"),
}, context_instance=RequestContext(request))
def profile(request, username, template_name="basic_profiles/profile.html"):
other_user = get_object_or_404(User, username=username)
if request.user.is_authenticated():
if request.user == other_user:
is_me = True
else:
is_me = False
else:
is_me = False
# })
# if request.POST["action"] == "accept": # @@@ perhaps the form should just post to friends and be redirected here
# invitation_id = request.POST["invitation"]
# try:
# invitation = FriendshipInvitation.objects.get(id=invitation_id)
# if invitation.to_user == request.user:
# invitation.accept()
# request.user.message_set.create(message=_("You have accepted the friendship request from %(from_user)s") % {'from_user': invitation.from_user})
# is_friend = True
# other_friends = Friendship.objects.friends_for_user(other_user)
# except FriendshipInvitation.DoesNotExist:
# pass
# else:
# invite_form = InviteFriendForm(request.user, {
# 'to_user': username,
# 'message': ugettext("Let's be friends!"),
if is_me:
if request.method == "POST":
if request.POST["action"] == "update":
profile_form = ProfileForm(request.POST, instance=other_user.get_profile())
if profile_form.is_valid():
profile = profile_form.save(commit=False)
profile.user = other_user
profile.save()
else:
profile_form = ProfileForm(instance=other_user.get_profile())
else:
profile_form = ProfileForm(instance=other_user.get_profile())
else:
profile_form = None
return render_to_response(template_name, {
"profile_form": profile_form,
"is_me": is_me,
"other_user": other_user,
}, context_instance=RequestContext(request))
| true
| true
|
1c49ca865c275afff6e3b397b4fb7f0c5ba2036e
| 1,962
|
py
|
Python
|
mysite/polls/views.py
|
allentv/pycon-django-workshop
|
931c3b672882616355053f1d84432ecaacddfbfc
|
[
"MIT"
] | null | null | null |
mysite/polls/views.py
|
allentv/pycon-django-workshop
|
931c3b672882616355053f1d84432ecaacddfbfc
|
[
"MIT"
] | null | null | null |
mysite/polls/views.py
|
allentv/pycon-django-workshop
|
931c3b672882616355053f1d84432ecaacddfbfc
|
[
"MIT"
] | null | null | null |
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views import generic
from .models import Question
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
context = {
'latest_question_list': latest_question_list,
}
return render(request, 'polls_index.html', context)
# class IndexView(generic.ListView):
# template_name = 'polls_index.html'
# context_object_name = 'latest_question_list'
# def get_queryset(self):
# return Question.objects.order_by('-pub_date')[:5]
# def detail(request, question_id):
# question = get_object_or_404(Question, pk=question_id)
# return render(request, 'polls_detail.html', {'question': question})
class DetailView(generic.DetailView):
model = Question
template_name = 'polls_detail.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls_detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
# def results(request, question_id):
# question = get_object_or_404(Question, pk=question_id)
# return render(request, 'polls_results.html', {'question': question})
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls_results.html'
| 33.827586
| 82
| 0.705403
|
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views import generic
from .models import Question
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
context = {
'latest_question_list': latest_question_list,
}
return render(request, 'polls_index.html', context)
class DetailView(generic.DetailView):
model = Question
template_name = 'polls_detail.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
return render(request, 'polls_detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
# def results(request, question_id):
# question = get_object_or_404(Question, pk=question_id)
# return render(request, 'polls_results.html', {'question': question})
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls_results.html'
| true
| true
|
1c49cadd0256d125b86d00b3f7ba4dc0283c375c
| 3,458
|
py
|
Python
|
torchio/data/inference/aggregator.py
|
Jimmy2027/torchio
|
98e5f4f379e877fa20c49f93645a3d0e0834f650
|
[
"MIT"
] | null | null | null |
torchio/data/inference/aggregator.py
|
Jimmy2027/torchio
|
98e5f4f379e877fa20c49f93645a3d0e0834f650
|
[
"MIT"
] | null | null | null |
torchio/data/inference/aggregator.py
|
Jimmy2027/torchio
|
98e5f4f379e877fa20c49f93645a3d0e0834f650
|
[
"MIT"
] | null | null | null |
from typing import Tuple
import torch
import numpy as np
from ...utils import to_tuple
from ...torchio import TypeData, TypeTuple
from ..subject import Subject
class GridAggregator:
r"""Aggregate patches for dense inference.
This class is typically used to build a volume made of batches after
inference of patches extracted by a :py:class:`~torchio.data.GridSampler`.
Args:
sample: Instance of:py:class:`~torchio.data.subject.Subject`
from which patches will be extracted (probably using a
:py:class:`~torchio.data.GridSampler`).
patch_overlap: Tuple of integers :math:`(d_o, h_o, w_o)` specifying the
overlap between patches. If a single number
:math:`n` is provided, :math:`d_o = h_o = w_o = n`.
out_channels: Number of channels in the output tensor.
.. note:: Adapted from NiftyNet. See `this NiftyNet tutorial
<https://niftynet.readthedocs.io/en/dev/window_sizes.html>`_ for more
information.
"""
def __init__(
self,
sample: Subject,
patch_overlap: TypeTuple,
out_channels: int = 1,
):
self._output_tensor = torch.zeros(out_channels, *sample.shape)
self.patch_overlap = to_tuple(patch_overlap, length=3)
@staticmethod
def _crop_batch(
patches: torch.Tensor,
location: np.ndarray,
border: Tuple[int, int, int],
) -> Tuple[TypeData, np.ndarray]:
location = location.astype(np.int)
batch_shape = patches.shape
spatial_shape = batch_shape[2:] # ignore batch and channels dim
num_dimensions = 3
for idx in range(num_dimensions):
location[:, idx] = location[:, idx] + border[idx]
location[:, idx + 3] = location[:, idx + 3] - border[idx]
cropped_shape = np.max(location[:, 3:6] - location[:, 0:3], axis=0)
diff = spatial_shape - cropped_shape
left = np.floor(diff / 2).astype(np.int)
i_ini, j_ini, k_ini = left
i_fin, j_fin, k_fin = left + cropped_shape
batch = patches[
:, # batch dimension
:, # channels dimension
i_ini:i_fin,
j_ini:j_fin,
k_ini:k_fin,
]
return batch, location
def _ensure_output_dtype(self, tensor: torch.Tensor) -> None:
"""Make sure the output tensor type is the same as the input patches."""
if self._output_tensor.dtype != tensor.dtype:
self._output_tensor = self._output_tensor.type(tensor.dtype)
def add_batch(self, patches: torch.Tensor, locations: TypeData) -> None:
patches = patches.cpu()
self._ensure_output_dtype(patches)
location_init = np.copy(locations)
init_ones = np.ones_like(patches)
patches, _ = self._crop_batch(
patches, location_init, self.patch_overlap)
location_init = np.copy(locations)
_, locations = self._crop_batch(
init_ones, location_init, self.patch_overlap)
for patch, location in zip(patches, locations):
i_ini, j_ini, k_ini, i_fin, j_fin, k_fin = location
channels = len(patch)
for channel in range(channels):
self._output_tensor[channel, i_ini:i_fin, j_ini:j_fin, k_ini:k_fin] = patch[channel]
def get_output_tensor(self) -> torch.Tensor:
return self._output_tensor
| 39.747126
| 100
| 0.622325
|
from typing import Tuple
import torch
import numpy as np
from ...utils import to_tuple
from ...torchio import TypeData, TypeTuple
from ..subject import Subject
class GridAggregator:
def __init__(
self,
sample: Subject,
patch_overlap: TypeTuple,
out_channels: int = 1,
):
self._output_tensor = torch.zeros(out_channels, *sample.shape)
self.patch_overlap = to_tuple(patch_overlap, length=3)
@staticmethod
def _crop_batch(
patches: torch.Tensor,
location: np.ndarray,
border: Tuple[int, int, int],
) -> Tuple[TypeData, np.ndarray]:
location = location.astype(np.int)
batch_shape = patches.shape
spatial_shape = batch_shape[2:]
num_dimensions = 3
for idx in range(num_dimensions):
location[:, idx] = location[:, idx] + border[idx]
location[:, idx + 3] = location[:, idx + 3] - border[idx]
cropped_shape = np.max(location[:, 3:6] - location[:, 0:3], axis=0)
diff = spatial_shape - cropped_shape
left = np.floor(diff / 2).astype(np.int)
i_ini, j_ini, k_ini = left
i_fin, j_fin, k_fin = left + cropped_shape
batch = patches[
:,
:,
i_ini:i_fin,
j_ini:j_fin,
k_ini:k_fin,
]
return batch, location
def _ensure_output_dtype(self, tensor: torch.Tensor) -> None:
if self._output_tensor.dtype != tensor.dtype:
self._output_tensor = self._output_tensor.type(tensor.dtype)
def add_batch(self, patches: torch.Tensor, locations: TypeData) -> None:
patches = patches.cpu()
self._ensure_output_dtype(patches)
location_init = np.copy(locations)
init_ones = np.ones_like(patches)
patches, _ = self._crop_batch(
patches, location_init, self.patch_overlap)
location_init = np.copy(locations)
_, locations = self._crop_batch(
init_ones, location_init, self.patch_overlap)
for patch, location in zip(patches, locations):
i_ini, j_ini, k_ini, i_fin, j_fin, k_fin = location
channels = len(patch)
for channel in range(channels):
self._output_tensor[channel, i_ini:i_fin, j_ini:j_fin, k_ini:k_fin] = patch[channel]
def get_output_tensor(self) -> torch.Tensor:
return self._output_tensor
| true
| true
|
1c49cb658dbcd25048b0ba1ab66c8574e990ca81
| 1,086
|
py
|
Python
|
test/terra/pulse/de/__init__.py
|
sagarpahwa/qiskit-aer
|
77e40c8d99fd0490d85285e96f87e4905017b646
|
[
"Apache-2.0"
] | 313
|
2018-12-19T09:19:12.000Z
|
2022-03-21T18:15:41.000Z
|
test/terra/pulse/de/__init__.py
|
sagarpahwa/qiskit-aer
|
77e40c8d99fd0490d85285e96f87e4905017b646
|
[
"Apache-2.0"
] | 933
|
2018-12-21T02:56:49.000Z
|
2022-03-30T01:19:54.000Z
|
test/terra/pulse/de/__init__.py
|
sagarpahwa/qiskit-aer
|
77e40c8d99fd0490d85285e96f87e4905017b646
|
[
"Apache-2.0"
] | 313
|
2018-12-19T14:52:55.000Z
|
2022-02-28T20:20:14.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
'''
Terra tests
'''
import os
def load_tests(loader, standard_tests, pattern):
"""
test suite for unittest discovery
"""
this_dir = os.path.dirname(__file__)
if pattern in ['test*.py', '*_test.py']:
package_tests = loader.discover(start_dir=this_dir, pattern=pattern)
standard_tests.addTests(package_tests)
elif pattern in ['profile*.py', '*_profile.py']:
loader.testMethodPrefix = 'profile'
package_tests = loader.discover(start_dir=this_dir, pattern='test*.py')
standard_tests.addTests(package_tests)
return standard_tests
| 32.909091
| 79
| 0.714549
|
import os
def load_tests(loader, standard_tests, pattern):
this_dir = os.path.dirname(__file__)
if pattern in ['test*.py', '*_test.py']:
package_tests = loader.discover(start_dir=this_dir, pattern=pattern)
standard_tests.addTests(package_tests)
elif pattern in ['profile*.py', '*_profile.py']:
loader.testMethodPrefix = 'profile'
package_tests = loader.discover(start_dir=this_dir, pattern='test*.py')
standard_tests.addTests(package_tests)
return standard_tests
| true
| true
|
1c49cba02d177d2ba601cb3a59b70360782c086a
| 174,750
|
py
|
Python
|
DungeonGenerator.py
|
JaxxyIV/ALttPDoorRandomizer
|
bbad1d1d8b1020b50453b66b2d88c5fb8712be38
|
[
"MIT"
] | 42
|
2019-08-22T16:19:51.000Z
|
2022-03-30T17:39:39.000Z
|
DungeonGenerator.py
|
JaxxyIV/ALttPDoorRandomizer
|
bbad1d1d8b1020b50453b66b2d88c5fb8712be38
|
[
"MIT"
] | 48
|
2019-09-04T22:47:03.000Z
|
2022-01-13T22:16:13.000Z
|
DungeonGenerator.py
|
JaxxyIV/ALttPDoorRandomizer
|
bbad1d1d8b1020b50453b66b2d88c5fb8712be38
|
[
"MIT"
] | 35
|
2020-01-10T09:12:53.000Z
|
2022-03-23T08:22:25.000Z
|
import RaceRandom as random
import collections
import itertools
from collections import defaultdict, deque
from functools import reduce
import logging
import math
import operator as op
import time
from typing import List
from BaseClasses import DoorType, Direction, CrystalBarrier, RegionType, Polarity, PolSlot, flooded_keys, Sector
from BaseClasses import Hook, hook_from_door
from Regions import dungeon_events, flooded_keys_reverse
from Dungeons import dungeon_regions, split_region_starts
from RoomData import DoorKind
class GraphPiece:
def __init__(self):
self.hanger_info = None
self.hanger_crystal = None
self.hooks = {}
self.visited_regions = set()
self.possible_bk_locations = set()
self.pinball_used = False
# Dungeons shouldn't be generated until all entrances are appropriately accessible
def pre_validate(builder, entrance_region_names, split_dungeon, world, player):
entrance_regions = convert_regions(entrance_region_names, world, player)
excluded = {}
for region in entrance_regions:
portal = next((x for x in world.dungeon_portals[player] if x.door.entrance.parent_region == region), None)
if portal and portal.destination:
excluded[region] = None
entrance_regions = [x for x in entrance_regions if x not in excluded.keys()]
proposed_map = {}
doors_to_connect = {}
all_regions = set()
bk_needed = False
bk_special = False
for sector in builder.sectors:
for door in sector.outstanding_doors:
doors_to_connect[door.name] = door
all_regions.update(sector.regions)
bk_needed = bk_needed or determine_if_bk_needed(sector, split_dungeon, world, player)
bk_special = bk_special or check_for_special(sector)
paths = determine_paths_for_dungeon(world, player, all_regions, builder.name)
dungeon, hangers, hooks = gen_dungeon_info(builder.name, builder.sectors, entrance_regions, all_regions,
proposed_map, doors_to_connect, bk_needed, bk_special, world, player)
return check_valid(builder.name, dungeon, hangers, hooks, proposed_map, doors_to_connect, all_regions,
bk_needed, bk_special, paths, entrance_regions, world, player)
def generate_dungeon(builder, entrance_region_names, split_dungeon, world, player):
stonewalls = check_for_stonewalls(builder)
sector = generate_dungeon_main(builder, entrance_region_names, split_dungeon, world, player)
for stonewall in stonewalls:
if not stonewall_valid(stonewall):
builder.pre_open_stonewalls.add(stonewall)
return sector
def check_for_stonewalls(builder):
stonewalls = set()
for sector in builder.sectors:
for door in sector.outstanding_doors:
if door.stonewall:
stonewalls.add(door)
return stonewalls
def generate_dungeon_main(builder, entrance_region_names, split_dungeon, world, player):
if builder.valid_proposal: # we made this earlier in gen, just use it
proposed_map = builder.valid_proposal
else:
proposed_map = generate_dungeon_find_proposal(builder, entrance_region_names, split_dungeon, world, player)
builder.valid_proposal = proposed_map
queue = collections.deque(proposed_map.items())
while len(queue) > 0:
a, b = queue.popleft()
connect_doors(a, b)
queue.remove((b, a))
if len(builder.sectors) == 0:
return Sector()
available_sectors = list(builder.sectors)
master_sector = available_sectors.pop()
for sub_sector in available_sectors:
master_sector.regions.extend(sub_sector.regions)
master_sector.outstanding_doors.clear()
master_sector.r_name_set = None
return master_sector
def generate_dungeon_find_proposal(builder, entrance_region_names, split_dungeon, world, player):
logger = logging.getLogger('')
name = builder.name
entrance_regions = convert_regions(entrance_region_names, world, player)
excluded = {}
for region in entrance_regions:
portal = next((x for x in world.dungeon_portals[player] if x.door.entrance.parent_region == region), None)
if portal and portal.destination:
excluded[region] = None
entrance_regions = [x for x in entrance_regions if x not in excluded.keys()]
doors_to_connect = {}
all_regions = set()
bk_needed = False
bk_special = False
for sector in builder.sectors:
for door in sector.outstanding_doors:
doors_to_connect[door.name] = door
all_regions.update(sector.regions)
bk_needed = bk_needed or determine_if_bk_needed(sector, split_dungeon, world, player)
bk_special = bk_special or check_for_special(sector)
proposed_map = {}
choices_master = [[]]
depth = 0
dungeon_cache = {}
backtrack = False
itr = 0
attempt = 1
finished = False
# flag if standard and this is hyrule castle
paths = determine_paths_for_dungeon(world, player, all_regions, name)
while not finished:
# what are my choices?
itr += 1
if itr > 1000:
if attempt > 9:
raise GenerationException('Generation taking too long. Ref %s' % name)
proposed_map = {}
choices_master = [[]]
depth = 0
dungeon_cache = {}
backtrack = False
itr = 0
attempt += 1
logger.debug(f'Starting new attempt {attempt}')
if depth not in dungeon_cache.keys():
dungeon, hangers, hooks = gen_dungeon_info(name, builder.sectors, entrance_regions, all_regions, proposed_map,
doors_to_connect, bk_needed, bk_special, world, player)
dungeon_cache[depth] = dungeon, hangers, hooks
valid = check_valid(name, dungeon, hangers, hooks, proposed_map, doors_to_connect, all_regions,
bk_needed, bk_special, paths, entrance_regions, world, player)
else:
dungeon, hangers, hooks = dungeon_cache[depth]
valid = True
if valid:
if len(proposed_map) == len(doors_to_connect):
if dungeon['Origin'].pinball_used:
door = world.get_door('Skull Pinball WS', player)
room = world.get_room(door.roomIndex, player)
if room.doorList[door.doorListPos][1] == DoorKind.Trap:
room.change(door.doorListPos, DoorKind.Normal)
door.trapFlag = 0x0
door.blocked = False
finished = True
continue
prev_choices = choices_master[depth]
# make a choice
hanger, hook = make_a_choice(dungeon, hangers, hooks, prev_choices, name)
if hanger is None:
backtrack = True
else:
logger.debug(' ' * depth + "%d: Linking %s to %s", depth, hanger.name, hook.name)
proposed_map[hanger] = hook
proposed_map[hook] = hanger
last_choice = (hanger, hook)
choices_master[depth].append(last_choice)
depth += 1
choices_master.append([])
else:
backtrack = True
if backtrack:
backtrack = False
choices_master.pop()
dungeon_cache.pop(depth, None)
depth -= 1
if depth < 0:
raise GenerationException('Invalid dungeon. Ref %s' % name)
a, b = choices_master[depth][-1]
logger.debug(' ' * depth + "%d: Rescinding %s, %s", depth, a.name, b.name)
proposed_map.pop(a, None)
proposed_map.pop(b, None)
return proposed_map
def determine_if_bk_needed(sector, split_dungeon, world, player):
if not split_dungeon:
for region in sector.regions:
for ext in region.exits:
door = world.check_for_door(ext.name, player)
if door is not None and door.bigKey:
return True
return False
def check_for_special(sector):
for region in sector.regions:
for loc in region.locations:
if loc.forced_big_key():
return True
return False
def gen_dungeon_info(name, available_sectors, entrance_regions, all_regions, proposed_map, valid_doors, bk_needed, bk_special, world, player):
# step 1 create dungeon: Dict<DoorName|Origin, GraphPiece>
dungeon = {}
start = ExplorationState(dungeon=name)
start.big_key_special = bk_special
group_flags, door_map = find_bk_groups(name, available_sectors, proposed_map, bk_special)
bk_flag = False if world.bigkeyshuffle[player] and not bk_special else bk_needed
def exception(d):
return name == 'Skull Woods 2' and d.name == 'Skull Pinball WS'
original_state = extend_reachable_state_improved(entrance_regions, start, proposed_map, all_regions,
valid_doors, bk_flag, world, player, exception)
dungeon['Origin'] = create_graph_piece_from_state(None, original_state, original_state, proposed_map, exception)
either_crystal = True # if all hooks from the origin are either, explore all bits with either
for hook, crystal in dungeon['Origin'].hooks.items():
if crystal != CrystalBarrier.Either:
either_crystal = False
break
init_crystal = CrystalBarrier.Either if either_crystal else CrystalBarrier.Orange
hanger_set = set()
o_state_cache = {}
for sector in available_sectors:
for door in sector.outstanding_doors:
if door not in proposed_map.keys():
hanger_set.add(door)
bk_flag = group_flags[door_map[door]]
parent = door.entrance.parent_region
crystal_start = CrystalBarrier.Either if parent.crystal_switch else init_crystal
init_state = ExplorationState(crystal_start, dungeon=name)
init_state.big_key_special = start.big_key_special
o_state = extend_reachable_state_improved([parent], init_state, proposed_map, all_regions,
valid_doors, bk_flag, world, player, exception)
o_state_cache[door.name] = o_state
piece = create_graph_piece_from_state(door, o_state, o_state, proposed_map, exception)
dungeon[door.name] = piece
check_blue_states(hanger_set, dungeon, o_state_cache, proposed_map, all_regions, valid_doors,
group_flags, door_map, world, player, exception)
# catalog hooks: Dict<Hook, List<Door, Crystal, Door>>
# and hangers: Dict<Hang, List<Door>>
avail_hooks = defaultdict(list)
hangers = defaultdict(list)
for key, piece in dungeon.items():
door_hang = piece.hanger_info
if door_hang is not None:
hanger = hanger_from_door(door_hang)
hangers[hanger].append(door_hang)
for door, crystal in piece.hooks.items():
hook = hook_from_door(door)
avail_hooks[hook].append((door, crystal, door_hang))
# thin out invalid hanger
winnow_hangers(hangers, avail_hooks)
return dungeon, hangers, avail_hooks
def find_bk_groups(name, available_sectors, proposed_map, bk_special):
groups = {}
door_ids = {}
gid = 1
for sector in available_sectors:
if bk_special:
my_gid = None
for door in sector.outstanding_doors:
if door in proposed_map and proposed_map[door] in door_ids:
if my_gid:
merge_gid = door_ids[proposed_map[door]]
for door in door_ids.keys():
if door_ids[door] == merge_gid:
door_ids[door] = my_gid
groups[my_gid] = groups[my_gid] or groups[merge_gid]
else:
my_gid = door_ids[proposed_map[door]]
if not my_gid:
my_gid = gid
gid += 1
for door in sector.outstanding_doors:
door_ids[door] = my_gid
if my_gid not in groups.keys():
groups[my_gid] = False
for region in sector.regions:
for loc in region.locations:
if loc.forced_item and loc.item.bigkey and name in loc.item.name:
groups[my_gid] = True
else:
for door in sector.outstanding_doors:
door_ids[door] = gid
groups[gid] = False
return groups, door_ids
def check_blue_states(hanger_set, dungeon, o_state_cache, proposed_map, all_regions, valid_doors, group_flags, door_map,
world, player, exception):
not_blue = set()
not_blue.update(hanger_set)
doors_to_check = set()
doors_to_check.update(hanger_set) # doors to check, check everything on first pass
blue_hooks = []
blue_hangers = []
new_blues = True
while new_blues:
new_blues = False
for door in doors_to_check:
piece = dungeon[door.name]
for hook, crystal in piece.hooks.items():
if crystal != CrystalBarrier.Orange:
h_type = hook_from_door(hook)
if h_type not in blue_hooks:
new_blues = True
blue_hooks.append(h_type)
if piece.hanger_crystal == CrystalBarrier.Either:
h_type = hanger_from_door(piece.hanger_info)
if h_type not in blue_hangers:
new_blues = True
blue_hangers.append(h_type)
doors_to_check = set()
for door in not_blue: # am I now blue?
hang_type = hanger_from_door(door) # am I hangable on a hook?
hook_type = hook_from_door(door) # am I hookable onto a hanger?
if (hang_type in blue_hooks and not door.stonewall) or hook_type in blue_hangers:
bk_flag = group_flags[door_map[door]]
explore_blue_state(door, dungeon, o_state_cache[door.name], proposed_map, all_regions, valid_doors,
bk_flag, world, player, exception)
doors_to_check.add(door)
not_blue.difference_update(doors_to_check)
def explore_blue_state(door, dungeon, o_state, proposed_map, all_regions, valid_doors, bk_flag, world, player, exception):
parent = door.entrance.parent_region
blue_start = ExplorationState(CrystalBarrier.Blue, o_state.dungeon)
blue_start.big_key_special = o_state.big_key_special
b_state = extend_reachable_state_improved([parent], blue_start, proposed_map, all_regions, valid_doors, bk_flag,
world, player, exception)
dungeon[door.name] = create_graph_piece_from_state(door, o_state, b_state, proposed_map, exception)
def make_a_choice(dungeon, hangers, avail_hooks, prev_choices, name):
# choose a hanger
all_hooks = {}
origin = dungeon['Origin']
for key in avail_hooks.keys():
for hstuff in avail_hooks[key]:
all_hooks[hstuff[0]] = None
candidate_hangers = []
for key in hangers.keys():
candidate_hangers.extend(hangers[key])
candidate_hangers.sort(key=lambda x: x.name) # sorting to create predictable seeds
random.shuffle(candidate_hangers) # randomize if equal preference
stage_2_hangers = []
if len(prev_choices) > 0:
prev_hanger = prev_choices[0][0]
if prev_hanger in candidate_hangers:
stage_2_hangers.append(prev_hanger)
candidate_hangers.remove(prev_hanger)
hookable_hangers = collections.deque()
queue = collections.deque(candidate_hangers)
while len(queue) > 0:
c_hang = queue.popleft()
if c_hang in all_hooks.keys():
hookable_hangers.append(c_hang)
else:
stage_2_hangers.append(c_hang) # prefer hangers that are not hooks
# todo : prefer hangers with fewer hooks at some point? not sure about this
# this prefer hangers of the fewest type - to catch problems fast
hookable_hangers = sorted(hookable_hangers, key=lambda door: len(hangers[hanger_from_door(door)]), reverse=True)
origin_hangers = []
while len(hookable_hangers) > 0:
c_hang = hookable_hangers.pop()
if c_hang in origin.hooks.keys():
origin_hangers.append(c_hang)
else:
stage_2_hangers.append(c_hang) # prefer hangers that are not hooks on the 'origin'
stage_2_hangers.extend(origin_hangers)
hook = None
next_hanger = None
while hook is None:
if len(stage_2_hangers) == 0:
return None, None
next_hanger = stage_2_hangers.pop(0)
next_hanger_type = hanger_from_door(next_hanger)
hook_candidates = []
for door, crystal, orig_hang in avail_hooks[next_hanger_type]:
if filter_choices(next_hanger, door, orig_hang, prev_choices, hook_candidates):
hook_candidates.append(door)
if len(hook_candidates) > 0:
hook_candidates.sort(key=lambda x: x.name) # sort for deterministic seeds
hook = random.choice(tuple(hook_candidates))
elif name == 'Skull Woods 2' and next_hanger.name == 'Skull Pinball WS':
continue
else:
return None, None
return next_hanger, hook
def filter_choices(next_hanger, door, orig_hang, prev_choices, hook_candidates):
if (next_hanger, door) in prev_choices or (door, next_hanger) in prev_choices:
return False
return next_hanger != door and orig_hang != next_hanger and door not in hook_candidates
def check_valid(name, dungeon, hangers, hooks, proposed_map, doors_to_connect, all_regions,
bk_needed, bk_special, paths, entrance_regions, world, player):
# evaluate if everything is still plausible
# only origin is left in the dungeon and not everything is connected
if len(dungeon.keys()) <= 1 and len(proposed_map.keys()) < len(doors_to_connect):
return False
# origin has no more hooks, but not all doors have been proposed
if not world.bigkeyshuffle[player]:
possible_bks = len(dungeon['Origin'].possible_bk_locations)
true_origin_hooks = [x for x in dungeon['Origin'].hooks.keys() if not x.bigKey or possible_bks > 0 or not bk_needed]
if len(true_origin_hooks) == 0 and len(proposed_map.keys()) < len(doors_to_connect):
return False
if len(true_origin_hooks) == 0 and bk_needed and possible_bks == 0 and len(proposed_map.keys()) == len(doors_to_connect):
return False
for key in hangers.keys():
if len(hooks[key]) > 0 and len(hangers[key]) == 0:
return False
# todo: stonewall - check that there's no hook-only that is without a matching hanger
must_hang = defaultdict(list)
all_hooks = set()
for key in hooks.keys():
for hook in hooks[key]:
all_hooks.add(hook[0])
for key in hangers.keys():
for hanger in hangers[key]:
if hanger not in all_hooks:
must_hang[key].append(hanger)
for key in must_hang.keys():
if len(must_hang[key]) > len(hooks[key]):
return False
outstanding_doors = defaultdict(list)
for d in doors_to_connect.values():
if d not in proposed_map.keys():
outstanding_doors[hook_from_door(d)].append(d)
for key in outstanding_doors.keys():
opp_key = opposite_h_type(key)
if len(outstanding_doors[key]) > 0 and len(hangers[key]) == 0 and len(hooks[opp_key]) == 0:
return False
all_visited = set()
bk_possible = not bk_needed or (world.bigkeyshuffle[player] and not bk_special)
for piece in dungeon.values():
all_visited.update(piece.visited_regions)
if not bk_possible and len(piece.possible_bk_locations) > 0:
bk_possible = True
if len(all_regions.difference(all_visited)) > 0:
return False
if not bk_possible:
return False
if not valid_paths(name, paths, entrance_regions, doors_to_connect, all_regions, proposed_map,
bk_needed, bk_special, world, player):
return False
new_hangers_found = True
accessible_hook_types = []
hanger_matching = set()
all_hangers = set()
origin_hooks = set(dungeon['Origin'].hooks.keys())
for door_hook in origin_hooks:
h_type = hook_from_door(door_hook)
if h_type not in accessible_hook_types:
accessible_hook_types.append(h_type)
while new_hangers_found:
new_hangers_found = False
for hanger_set in hangers.values():
for hanger in hanger_set:
all_hangers.add(hanger)
h_type = hanger_from_door(hanger)
if (h_type in accessible_hook_types or hanger in origin_hooks) and hanger not in hanger_matching:
new_hangers_found = True
hanger_matching.add(hanger)
matching_hooks = dungeon[hanger.name].hooks.keys()
origin_hooks.update(matching_hooks)
for door_hook in matching_hooks:
new_h_type = hook_from_door(door_hook)
if new_h_type not in accessible_hook_types:
accessible_hook_types.append(new_h_type)
return len(all_hangers.difference(hanger_matching)) == 0
def valid_paths(name, paths, entrance_regions, valid_doors, all_regions, proposed_map,
bk_needed, bk_special, world, player):
for path in paths:
if type(path) is tuple:
target = path[1]
start_regions = []
for region in all_regions:
if path[0] == region.name:
start_regions.append(region)
break
else:
target = path
start_regions = entrance_regions
if not valid_path(name, start_regions, target, valid_doors, proposed_map, all_regions,
bk_needed, bk_special, world, player):
return False
return True
def valid_path(name, starting_regions, target, valid_doors, proposed_map, all_regions,
bk_needed, bk_special, world, player):
target_regions = set()
if type(target) is not list:
for region in all_regions:
if target == region.name:
target_regions.add(region)
break
else:
for region in all_regions:
if region.name in target:
target_regions.add(region)
start = ExplorationState(dungeon=name)
start.big_key_special = bk_special
bk_flag = False if world.bigkeyshuffle[player] and not bk_special else bk_needed
def exception(d):
return name == 'Skull Woods 2' and d.name == 'Skull Pinball WS'
original_state = extend_reachable_state_improved(starting_regions, start, proposed_map, all_regions,
valid_doors, bk_flag, world, player, exception)
for exp_door in original_state.unattached_doors:
if not exp_door.door.blocked:
return True # outstanding connection possible
for target in target_regions:
if original_state.visited_at_all(target):
return True
return False # couldn't find an outstanding door or the target
def determine_required_paths(world, player):
paths = {}
for name, builder in world.dungeon_layouts[player].items():
all_regions = builder.master_sector.regions
paths[name] = determine_paths_for_dungeon(world, player, all_regions, name)
return paths
boss_path_checks = ['Eastern Boss', 'Desert Boss', 'Hera Boss', 'Tower Agahnim 1', 'PoD Boss', 'Swamp Boss',
'Skull Boss', 'Ice Boss', 'Mire Boss', 'TR Boss', 'GT Agahnim 2']
# pinball is allowed to orphan you
drop_path_checks = ['Skull Pot Circle', 'Skull Left Drop', 'Skull Back Drop', 'Sewers Rat Path']
def determine_paths_for_dungeon(world, player, all_regions, name):
all_r_names = set(x.name for x in all_regions)
paths = []
non_hole_portals = []
for portal in world.dungeon_portals[player]:
if portal.door.entrance.parent_region in all_regions:
non_hole_portals.append(portal.door.entrance.parent_region.name)
if portal.destination:
paths.append(portal.door.entrance.parent_region.name)
if world.mode[player] == 'standard' and name == 'Hyrule Castle':
paths.append('Hyrule Dungeon Cellblock')
paths.append(('Hyrule Dungeon Cellblock', 'Sanctuary'))
if world.doorShuffle[player] in ['basic'] and name == 'Thieves Town':
paths.append('Thieves Attic Window')
elif 'Thieves Attic Window' in all_r_names:
paths.append('Thieves Attic Window')
for boss in boss_path_checks:
if boss in all_r_names:
paths.append(boss)
if 'Thieves Boss' in all_r_names:
paths.append('Thieves Boss')
paths.append(('Thieves Blind\'s Cell', 'Thieves Boss'))
for drop_check in drop_path_checks:
if drop_check in all_r_names:
paths.append((drop_check, non_hole_portals))
return paths
def winnow_hangers(hangers, hooks):
removal_info = []
for hanger, door_set in hangers.items():
for door in door_set:
hook_set = hooks[hanger]
if len(hook_set) == 0:
removal_info.append((hanger, door))
else:
found_valid = False
for door_hook, crystal, orig_hanger in hook_set:
if orig_hanger != door:
found_valid = True
break
if not found_valid:
removal_info.append((hanger, door))
for hanger, door in removal_info:
hangers[hanger].remove(door)
def stonewall_valid(stonewall):
bad_door = stonewall.dest
if bad_door.blocked:
return True # great we're done with this one
loop_region = stonewall.entrance.parent_region
start_regions = [bad_door.entrance.parent_region]
if bad_door.dependents:
for dep in bad_door.dependents:
start_regions.append(dep.entrance.parent_region)
queue = deque(start_regions)
visited = set(start_regions)
while len(queue) > 0:
region = queue.popleft()
if region == loop_region:
return False # guaranteed loop
possible_entrances = list(region.entrances)
for entrance in possible_entrances:
parent = entrance.parent_region
if parent.type != RegionType.Dungeon:
return False # you can get stuck from an entrance
else:
door = entrance.door
if (door is None or (door != stonewall and not door.blocked)) and parent not in visited:
visited.add(parent)
queue.append(parent)
# we didn't find anything bad
return True
def create_graph_piece_from_state(door, o_state, b_state, proposed_map, exception):
# todo: info about dungeon events - not sure about that
graph_piece = GraphPiece()
all_unattached = {}
for exp_d in o_state.unattached_doors:
all_unattached[exp_d.door] = exp_d.crystal
for exp_d in b_state.unattached_doors:
d = exp_d.door
if d in all_unattached.keys():
if all_unattached[d] != exp_d.crystal:
if all_unattached[d] == CrystalBarrier.Orange and exp_d.crystal == CrystalBarrier.Blue:
all_unattached[d] = CrystalBarrier.Null
elif all_unattached[d] == CrystalBarrier.Blue and exp_d.crystal == CrystalBarrier.Orange:
# the swapping case
logging.getLogger('').warning('Mismatched state @ %s (o:%s b:%s)', d.name, all_unattached[d],
exp_d.crystal)
elif all_unattached[d] == CrystalBarrier.Either:
all_unattached[d] = exp_d.crystal # pessimism, and if not this, leave it alone
else:
all_unattached[exp_d.door] = exp_d.crystal
h_crystal = door.crystal if door is not None else None
for d, crystal in all_unattached.items():
if (door is None or d != door) and (not d.blocked or exception(d))and d not in proposed_map.keys():
graph_piece.hooks[d] = crystal
if d == door:
h_crystal = crystal
graph_piece.hanger_info = door
graph_piece.hanger_crystal = h_crystal
graph_piece.visited_regions.update(o_state.visited_blue)
graph_piece.visited_regions.update(o_state.visited_orange)
graph_piece.visited_regions.update(b_state.visited_blue)
graph_piece.visited_regions.update(b_state.visited_orange)
graph_piece.possible_bk_locations.update(filter_for_potential_bk_locations(o_state.bk_found))
graph_piece.possible_bk_locations.update(filter_for_potential_bk_locations(b_state.bk_found))
graph_piece.pinball_used = o_state.pinball_used or b_state.pinball_used
return graph_piece
def filter_for_potential_bk_locations(locations):
return [x for x in locations if
'- Big Chest' not in x.name and '- Prize' not in x.name and x.name not in dungeon_events
and not x.forced_item and x.name not in ['Agahnim 1', 'Agahnim 2']]
type_map = {
Hook.Stairs: Hook.Stairs,
Hook.North: Hook.South,
Hook.South: Hook.North,
Hook.West: Hook.East,
Hook.East: Hook.West
}
def opposite_h_type(h_type) -> Hook:
return type_map[h_type]
hang_dir_map = {
Direction.North: Hook.South,
Direction.South: Hook.North,
Direction.West: Hook.East,
Direction.East: Hook.West,
}
def hanger_from_door(door):
if door.type == DoorType.SpiralStairs:
return Hook.Stairs
if door.type in [DoorType.Normal, DoorType.Open, DoorType.StraightStairs, DoorType.Ladder]:
return hang_dir_map[door.direction]
return None
def connect_doors(a, b):
# Return on unsupported types.
if a.type in [DoorType.Hole, DoorType.Warp, DoorType.Interior, DoorType.Logical]:
return
# Connect supported types
if a.type in [DoorType.Normal, DoorType.SpiralStairs, DoorType.Open, DoorType.StraightStairs, DoorType.Ladder]:
if a.blocked:
connect_one_way(b.entrance, a.entrance)
elif b.blocked:
connect_one_way(a.entrance, b.entrance)
else:
connect_two_way(a.entrance, b.entrance)
dep_doors, target = [], None
if len(a.dependents) > 0:
dep_doors, target = a.dependents, b
elif len(b.dependents) > 0:
dep_doors, target = b.dependents, a
if target is not None:
target_region = target.entrance.parent_region
for dep in dep_doors:
connect_simple_door(dep, target_region)
return
# If we failed to account for a type, panic
raise RuntimeError('Unknown door type ' + a.type.name)
def connect_two_way(entrance, ext):
# if these were already connected somewhere, remove the backreference
if entrance.connected_region is not None:
entrance.connected_region.entrances.remove(entrance)
if ext.connected_region is not None:
ext.connected_region.entrances.remove(ext)
entrance.connect(ext.parent_region)
ext.connect(entrance.parent_region)
if entrance.parent_region.dungeon:
ext.parent_region.dungeon = entrance.parent_region.dungeon
x = entrance.door
y = ext.door
if x is not None:
x.dest = y
if y is not None:
y.dest = x
def connect_one_way(entrance, ext):
# if these were already connected somewhere, remove the backreference
if entrance.connected_region is not None:
entrance.connected_region.entrances.remove(entrance)
if ext.connected_region is not None:
ext.connected_region.entrances.remove(ext)
entrance.connect(ext.parent_region)
if entrance.parent_region.dungeon:
ext.parent_region.dungeon = entrance.parent_region.dungeon
x = entrance.door
y = ext.door
if x is not None:
x.dest = y
if y is not None:
y.dest = x
def connect_simple_door(exit_door, region):
exit_door.entrance.connect(region)
exit_door.dest = region
special_big_key_doors = ['Hyrule Dungeon Cellblock Door', "Thieves Blind's Cell Door"]
class ExplorationState(object):
def __init__(self, init_crystal=CrystalBarrier.Orange, dungeon=None):
self.unattached_doors = []
self.avail_doors = []
self.event_doors = []
self.visited_orange = []
self.visited_blue = []
self.events = set()
self.crystal = init_crystal
# key region stuff
self.door_krs = {}
# key validation stuff
self.small_doors = []
self.big_doors = []
self.opened_doors = []
self.big_key_opened = False
self.big_key_special = False
self.found_locations = []
self.ttl_locations = 0
self.used_locations = 0
self.key_locations = 0
self.used_smalls = 0
self.bk_found = set()
self.non_door_entrances = []
self.dungeon = dungeon
self.pinball_used = False
def copy(self):
ret = ExplorationState(dungeon=self.dungeon)
ret.unattached_doors = list(self.unattached_doors)
ret.avail_doors = list(self.avail_doors)
ret.event_doors = list(self.event_doors)
ret.visited_orange = list(self.visited_orange)
ret.visited_blue = list(self.visited_blue)
ret.events = set(self.events)
ret.crystal = self.crystal
ret.door_krs = self.door_krs.copy()
ret.small_doors = list(self.small_doors)
ret.big_doors = list(self.big_doors)
ret.opened_doors = list(self.opened_doors)
ret.big_key_opened = self.big_key_opened
ret.big_key_special = self.big_key_special
ret.ttl_locations = self.ttl_locations
ret.key_locations = self.key_locations
ret.used_locations = self.used_locations
ret.used_smalls = self.used_smalls
ret.found_locations = list(self.found_locations)
ret.bk_found = set(self.bk_found)
ret.non_door_entrances = list(self.non_door_entrances)
ret.dungeon = self.dungeon
ret.pinball_used = self.pinball_used
return ret
def next_avail_door(self):
self.avail_doors.sort(key=lambda x: 0 if x.flag else 1 if x.door.bigKey else 2)
exp_door = self.avail_doors.pop()
self.crystal = exp_door.crystal
return exp_door
def visit_region(self, region, key_region=None, key_checks=False, bk_Flag=False):
if self.crystal == CrystalBarrier.Either:
if region not in self.visited_blue:
self.visited_blue.append(region)
if region not in self.visited_orange:
self.visited_orange.append(region)
elif self.crystal == CrystalBarrier.Orange:
self.visited_orange.append(region)
elif self.crystal == CrystalBarrier.Blue:
self.visited_blue.append(region)
if region.type == RegionType.Dungeon:
for location in region.locations:
if key_checks and location not in self.found_locations:
if location.forced_item and 'Small Key' in location.item.name:
self.key_locations += 1
if location.name not in dungeon_events and '- Prize' not in location.name and location.name not in ['Agahnim 1', 'Agahnim 2']:
self.ttl_locations += 1
if location not in self.found_locations: # todo: special logic for TT Boss?
self.found_locations.append(location)
if not bk_Flag:
self.bk_found.add(location)
if location.name in dungeon_events and location.name not in self.events:
if self.flooded_key_check(location):
self.perform_event(location.name, key_region)
if location.name in flooded_keys_reverse.keys() and self.location_found(
flooded_keys_reverse[location.name]):
self.perform_event(flooded_keys_reverse[location.name], key_region)
def flooded_key_check(self, location):
if location.name not in flooded_keys.keys():
return True
return flooded_keys[location.name] in [x.name for x in self.found_locations]
def location_found(self, location_name):
for l in self.found_locations:
if l.name == location_name:
return True
return False
def perform_event(self, location_name, key_region):
self.events.add(location_name)
queue = collections.deque(self.event_doors)
while len(queue) > 0:
exp_door = queue.popleft()
if exp_door.door.req_event == location_name:
self.avail_doors.append(exp_door)
self.event_doors.remove(exp_door)
if key_region is not None:
d_name = exp_door.door.name
if d_name not in self.door_krs.keys():
self.door_krs[d_name] = key_region
def add_all_entrance_doors_check_unattached(self, region, world, player):
door_list = [x for x in get_doors(world, region, player) if x.type in [DoorType.Normal, DoorType.SpiralStairs]]
door_list.extend(get_entrance_doors(world, region, player))
for door in door_list:
if self.can_traverse(door):
if door.dest is None and not self.in_door_list_ic(door, self.unattached_doors):
self.append_door_to_list(door, self.unattached_doors)
elif door.req_event is not None and door.req_event not in self.events and not self.in_door_list(door,
self.event_doors):
self.append_door_to_list(door, self.event_doors)
elif not self.in_door_list(door, self.avail_doors):
self.append_door_to_list(door, self.avail_doors)
for entrance in region.entrances:
door = world.check_for_door(entrance.name, player)
if door is None:
self.non_door_entrances.append(entrance)
def add_all_doors_check_unattached(self, region, world, player):
for door in get_doors(world, region, player):
if self.can_traverse(door):
if door.controller is not None:
door = door.controller
if door.dest is None and not self.in_door_list_ic(door, self.unattached_doors):
self.append_door_to_list(door, self.unattached_doors)
elif door.req_event is not None and door.req_event not in self.events and not self.in_door_list(door,
self.event_doors):
self.append_door_to_list(door, self.event_doors)
elif not self.in_door_list(door, self.avail_doors):
self.append_door_to_list(door, self.avail_doors)
def add_all_doors_check_proposed(self, region, proposed_map, valid_doors, flag, world, player, exception):
for door in get_doors(world, region, player):
if door.blocked and exception(door):
self.pinball_used = True
if self.can_traverse(door, exception):
if door.controller is not None:
door = door.controller
if door.dest is None and door not in proposed_map.keys() and door.name in valid_doors.keys():
if not self.in_door_list_ic(door, self.unattached_doors):
self.append_door_to_list(door, self.unattached_doors, flag)
else:
other = self.find_door_in_list(door, self.unattached_doors)
if self.crystal != other.crystal:
other.crystal = CrystalBarrier.Either
elif door.req_event is not None and door.req_event not in self.events and not self.in_door_list(door,
self.event_doors):
self.append_door_to_list(door, self.event_doors, flag)
elif not self.in_door_list(door, self.avail_doors):
self.append_door_to_list(door, self.avail_doors, flag)
def add_all_doors_check_key_region(self, region, key_region, world, player):
for door in get_doors(world, region, player):
if self.can_traverse(door):
if door.req_event is not None and door.req_event not in self.events and not self.in_door_list(door,
self.event_doors):
self.append_door_to_list(door, self.event_doors)
elif not self.in_door_list(door, self.avail_doors):
self.append_door_to_list(door, self.avail_doors)
if door.name not in self.door_krs.keys():
self.door_krs[door.name] = key_region
else:
if door.name not in self.door_krs.keys():
self.door_krs[door.name] = key_region
def add_all_doors_check_keys(self, region, key_door_proposal, world, player):
for door in get_doors(world, region, player):
if self.can_traverse(door):
if door.controller:
door = door.controller
if door in key_door_proposal and door not in self.opened_doors:
if not self.in_door_list(door, self.small_doors):
self.append_door_to_list(door, self.small_doors)
elif (door.bigKey or door.name in special_big_key_doors) and not self.big_key_opened:
if not self.in_door_list(door, self.big_doors):
self.append_door_to_list(door, self.big_doors)
elif door.req_event is not None and door.req_event not in self.events:
if not self.in_door_list(door, self.event_doors):
self.append_door_to_list(door, self.event_doors)
elif not self.in_door_list(door, self.avail_doors):
self.append_door_to_list(door, self.avail_doors)
def visited(self, region):
if self.crystal == CrystalBarrier.Either:
return region in self.visited_blue and region in self.visited_orange
elif self.crystal == CrystalBarrier.Orange:
return region in self.visited_orange
elif self.crystal == CrystalBarrier.Blue:
return region in self.visited_blue
return False
def visited_at_all(self, region):
return region in self.visited_blue or region in self.visited_orange
def found_forced_bk(self):
for location in self.found_locations:
if location.forced_big_key():
return True
return False
def can_traverse(self, door, exception=None):
if door.blocked:
return exception(door) if exception else False
if door.crystal not in [CrystalBarrier.Null, CrystalBarrier.Either]:
return self.crystal == CrystalBarrier.Either or door.crystal == self.crystal
return True
def count_locations_exclude_specials(self):
cnt = 0
for loc in self.found_locations:
if '- Big Chest' not in loc.name and '- Prize' not in loc.name and loc.name not in dungeon_events and not loc.forced_item:
cnt += 1
return cnt
def validate(self, door, region, world, player):
return self.can_traverse(door) and not self.visited(region) and valid_region_to_explore(region, self.dungeon,
world, player)
def in_door_list(self, door, door_list):
for d in door_list:
if d.door == door and d.crystal == self.crystal:
return True
return False
@staticmethod
def in_door_list_ic(door, door_list):
for d in door_list:
if d.door == door:
return True
return False
@staticmethod
def find_door_in_list(door, door_list):
for d in door_list:
if d.door == door:
return d
return None
def append_door_to_list(self, door, door_list, flag=False):
if door.crystal == CrystalBarrier.Null:
door_list.append(ExplorableDoor(door, self.crystal, flag))
else:
door_list.append(ExplorableDoor(door, door.crystal, flag))
def key_door_sort(self, d):
if d.door.smallKey:
if d.door in self.opened_doors:
return 1
else:
return 0
return 2
class ExplorableDoor(object):
def __init__(self, door, crystal, flag):
self.door = door
self.crystal = crystal
self.flag = flag
def __str__(self):
return str(self.__unicode__())
def __unicode__(self):
return '%s (%s)' % (self.door.name, self.crystal.name)
def extend_reachable_state_improved(search_regions, state, proposed_map, all_regions, valid_doors, bk_flag, world, player, exception):
local_state = state.copy()
for region in search_regions:
local_state.visit_region(region)
local_state.add_all_doors_check_proposed(region, proposed_map, valid_doors, False, world, player, exception)
while len(local_state.avail_doors) > 0:
explorable_door = local_state.next_avail_door()
if explorable_door.door.bigKey:
if bk_flag:
big_not_found = not special_big_key_found(local_state) if local_state.big_key_special else local_state.count_locations_exclude_specials() == 0
if big_not_found:
continue # we can't open this door
if explorable_door.door in proposed_map:
connect_region = world.get_entrance(proposed_map[explorable_door.door].name, player).parent_region
else:
connect_region = world.get_entrance(explorable_door.door.name, player).connected_region
if connect_region is not None:
if valid_region_to_explore_in_regions(connect_region, all_regions, world, player) and not local_state.visited(
connect_region):
flag = explorable_door.flag or explorable_door.door.bigKey
local_state.visit_region(connect_region, bk_Flag=flag)
local_state.add_all_doors_check_proposed(connect_region, proposed_map, valid_doors, flag, world, player, exception)
return local_state
def special_big_key_found(state):
for location in state.found_locations:
if location.forced_item and location.forced_item.bigkey:
return True
return False
def valid_region_to_explore_in_regions(region, all_regions, world, player):
if region is None:
return False
return (region.type == RegionType.Dungeon and region in all_regions)\
or region.name in world.inaccessible_regions[player]\
or (region.name == 'Hyrule Castle Ledge' and world.mode[player] == 'standard')
# cross-utility methods
def valid_region_to_explore(region, name, world, player):
if region is None:
return False
return (region.type == RegionType.Dungeon and region.dungeon.name in name)\
or region.name in world.inaccessible_regions[player]\
or (region.name == 'Hyrule Castle Ledge' and world.mode[player] == 'standard')
def get_doors(world, region, player):
res = []
for ext in region.exits:
door = world.check_for_door(ext.name, player)
if door is not None:
res.append(door)
return res
def get_dungeon_doors(region, world, player):
res = []
for ext in region.exits:
door = world.check_for_door(ext.name, player)
if door is not None and ext.parent_region.type == RegionType.Dungeon:
res.append(door)
return res
def get_entrance_doors(world, region, player):
res = []
for ext in region.entrances:
door = world.check_for_door(ext.name, player)
if door is not None:
res.append(door)
return res
def convert_regions(region_names, world, player):
region_list = []
for name in region_names:
region_list.append(world.get_region(name, player))
return region_list
# Begin crossed mode sector shuffle
class DungeonBuilder(object):
def __init__(self, name):
self.name = name
self.sectors = []
self.location_cnt = 0
self.key_drop_cnt = 0
self.bk_required = False
self.bk_provided = False
self.c_switch_required = False
self.c_switch_present = False
self.c_locked = False
self.dead_ends = 0
self.branches = 0
self.forced_loops = 0
self.total_conn_lack = 0
self.conn_needed = defaultdict(int)
self.conn_supplied = defaultdict(int)
self.conn_balance = defaultdict(int)
self.mag_needed = {}
self.unfulfilled = defaultdict(int)
self.all_entrances = None # used for sector segregation/branching
self.entrance_list = None # used for overworld accessibility
self.layout_starts = None # used for overworld accessibility
self.master_sector = None
self.path_entrances = None # used for pathing/key doors, I think
self.split_flag = False
self.pre_open_stonewalls = set() # used by stonewall system
self.candidates = None
self.key_doors_num = None
self.combo_size = None
self.flex = 0
self.key_door_proposal = None
self.allowance = None
if 'Stonewall' in name:
self.allowance = 1
elif 'Prewall' in name:
orig_name = name[:-8]
if orig_name in dungeon_dead_end_allowance.keys():
self.allowance = dungeon_dead_end_allowance[orig_name]
if self.allowance is None:
self.allowance = 1
self.valid_proposal = None
self.split_dungeon_map = None
self.exception_list = []
def polarity_complement(self):
pol = Polarity()
for sector in self.sectors:
pol += sector.polarity()
return pol.complement()
def polarity(self):
pol = Polarity()
for sector in self.sectors:
pol += sector.polarity()
return pol
def __str__(self):
return str(self.__unicode__())
def __unicode__(self):
return '%s' % self.name
def simple_dungeon_builder(name, sector_list):
define_sector_features(sector_list)
builder = DungeonBuilder(name)
dummy_pool = dict.fromkeys(sector_list)
global_pole = GlobalPolarity(dummy_pool)
for sector in sector_list:
assign_sector(sector, builder, dummy_pool, global_pole)
return builder
def create_dungeon_builders(all_sectors, connections_tuple, world, player,
dungeon_entrances=None, split_dungeon_entrances=None):
logger = logging.getLogger('')
logger.info('Shuffling Dungeon Sectors')
if dungeon_entrances is None:
dungeon_entrances = default_dungeon_entrances
if split_dungeon_entrances is None:
split_dungeon_entrances = split_region_starts
define_sector_features(all_sectors)
finished, dungeon_map, attempts = False, {}, 0
while not finished:
candidate_sectors = dict.fromkeys(all_sectors)
global_pole = GlobalPolarity(candidate_sectors)
dungeon_map = {}
for key in dungeon_regions.keys():
dungeon_map[key] = DungeonBuilder(key)
for key in dungeon_boss_sectors.keys():
current_dungeon = dungeon_map[key]
for r_name in dungeon_boss_sectors[key]:
assign_sector(find_sector(r_name, candidate_sectors), current_dungeon, candidate_sectors, global_pole)
if key == 'Hyrule Castle' and world.mode[player] == 'standard':
for r_name in ['Hyrule Dungeon Cellblock', 'Sanctuary']: # need to deliver zelda
assign_sector(find_sector(r_name, candidate_sectors), current_dungeon,
candidate_sectors, global_pole)
entrances_map, potentials, connections = connections_tuple
accessible_sectors, reverse_d_map = set(), {}
for key in dungeon_entrances.keys():
current_dungeon = dungeon_map[key]
current_dungeon.all_entrances = dungeon_entrances[key]
for r_name in current_dungeon.all_entrances:
sector = find_sector(r_name, candidate_sectors)
assign_sector(sector, current_dungeon, candidate_sectors, global_pole)
if r_name in entrances_map[key]:
if sector:
accessible_sectors.add(sector)
else:
if not sector:
sector = find_sector(r_name, all_sectors)
reverse_d_map[sector] = key
if world.mode[player] == 'standard':
current_dungeon = dungeon_map['Hyrule Castle']
standard_stair_check(dungeon_map, current_dungeon, candidate_sectors, global_pole)
complete_dungeons = {x: y for x, y in dungeon_map.items() if sum(len(sector.outstanding_doors) for sector in y.sectors) <= 0}
[dungeon_map.pop(key) for key in complete_dungeons.keys()]
# categorize sectors
identify_destination_sectors(accessible_sectors, reverse_d_map, dungeon_map, connections,
dungeon_entrances, split_dungeon_entrances)
for name, builder in dungeon_map.items():
calc_allowance_and_dead_ends(builder, connections_tuple, world, player)
if world.mode[player] == 'open' and world.shuffle[player] not in ['crossed', 'insanity']:
sanc = find_sector('Sanctuary', candidate_sectors)
if sanc: # only run if sanc if a candidate
lw_builders = []
for name, portal_list in dungeon_portals.items():
for portal_name in portal_list:
if world.get_portal(portal_name, player).light_world:
lw_builders.append(dungeon_map[name])
break
# portals only - not drops for mirror stuff
sanc_builder = random.choice(lw_builders)
assign_sector(sanc, sanc_builder, candidate_sectors, global_pole)
free_location_sectors = {}
crystal_switches = {}
crystal_barriers = {}
polarized_sectors = {}
neutral_sectors = {}
for sector in candidate_sectors:
if sector.chest_locations > 0:
free_location_sectors[sector] = None
elif sector.c_switch:
crystal_switches[sector] = None
elif sector.blue_barrier:
crystal_barriers[sector] = None
elif sector.polarity().is_neutral():
neutral_sectors[sector] = None
else:
polarized_sectors[sector] = None
assign_location_sectors(dungeon_map, free_location_sectors, global_pole)
leftover = assign_crystal_switch_sectors(dungeon_map, crystal_switches, crystal_barriers, global_pole)
ensure_crystal_switches_reachable(dungeon_map, leftover, polarized_sectors, crystal_barriers, global_pole)
for sector in leftover:
if sector.polarity().is_neutral():
neutral_sectors[sector] = None
else:
polarized_sectors[sector] = None
# blue barriers
assign_crystal_barrier_sectors(dungeon_map, crystal_barriers, global_pole)
try:
# polarity:
if not global_pole.is_valid(dungeon_map):
# restart
raise NeutralizingException('Either free location/crystal assignment is already globally invalid')
logger.info(world.fish.translate("cli", "cli", "balance.doors"))
builder_info = dungeon_entrances, split_dungeon_entrances, connections_tuple, world, player
assign_polarized_sectors(dungeon_map, polarized_sectors, global_pole, builder_info)
# the rest
assign_the_rest(dungeon_map, neutral_sectors, global_pole, builder_info)
dungeon_map.update(complete_dungeons)
finished = True
except (NeutralizingException, GenerationException) as e:
attempts += 1
logger.debug(f'Attempt {attempts} failed with {str(e)}')
if attempts >= 10:
raise Exception('Could not find a valid seed quickly, something is likely horribly wrong.', e)
return dungeon_map
def standard_stair_check(dungeon_map, dungeon, candidate_sectors, global_pole):
# this is because there must be at least one non-dead stairway in hc to get out
# this check may not be necessary
filtered_sectors = [x for x in candidate_sectors if any(y for y in x.outstanding_doors if not y.dead and y.type == DoorType.SpiralStairs)]
valid = False
while not valid:
chosen_sector = random.choice(filtered_sectors)
filtered_sectors.remove(chosen_sector)
valid = global_pole.is_valid_choice(dungeon_map, dungeon, [chosen_sector])
if valid:
assign_sector(chosen_sector, dungeon, candidate_sectors, global_pole)
def identify_destination_sectors(accessible_sectors, reverse_d_map, dungeon_map, connections, dungeon_entrances, split_dungeon_entrances):
accessible_overworld, found_connections, explored = set(), set(), False
while not explored:
explored = True
for ent_name, region in connections.items():
if ent_name in found_connections:
continue
sector = find_sector(ent_name, reverse_d_map.keys())
if sector in accessible_sectors:
found_connections.add(ent_name)
accessible_overworld.add(region) # todo: drops don't give ow access
explored = False
elif region in accessible_overworld:
found_connections.add(ent_name)
accessible_sectors.add(sector)
explored = False
else:
d_name = reverse_d_map[sector]
if d_name not in split_dungeon_entrances:
for r_name in dungeon_entrances[d_name]:
ent_sector = find_sector(r_name, dungeon_map[d_name].sectors)
if ent_sector in accessible_sectors and ent_name not in dead_entrances:
sector.destination_entrance = True
found_connections.add(ent_name)
accessible_sectors.add(sector)
accessible_overworld.add(region)
explored = False
break
elif d_name in split_dungeon_entrances.keys():
split_section = None
for split_name, split_list in split_dungeon_entrances[d_name].items():
if ent_name in split_list:
split_section = split_name
break
if split_section:
for r_name in split_dungeon_entrances[d_name][split_section]:
ent_sector = find_sector(r_name, dungeon_map[d_name].sectors)
if ent_sector in accessible_sectors and ent_name not in dead_entrances:
sector.destination_entrance = True
found_connections.add(ent_name)
accessible_sectors.add(sector)
accessible_overworld.add(region)
explored = False
break
# todo: split version that adds allowance for potential entrances
def calc_allowance_and_dead_ends(builder, connections_tuple, world, player):
portals = world.dungeon_portals[player]
entrances_map, potentials, connections = connections_tuple
name = builder.name if not builder.split_flag else builder.name.rsplit(' ', 1)[0]
needed_connections = [x for x in builder.all_entrances if x not in entrances_map[name]]
starting_allowance = 0
used_sectors = set()
destination_entrances = [x.door.entrance.parent_region.name for x in portals if x.destination]
dead_ends = [x.door.entrance.parent_region.name for x in portals if x.deadEnd]
for entrance in entrances_map[name]:
sector = find_sector(entrance, builder.sectors)
if sector:
outflow_target = 0 if entrance not in drop_entrances_allowance else 1
if sector not in used_sectors and (sector.adj_outflow() > outflow_target or entrance in dead_ends):
if entrance not in destination_entrances:
starting_allowance += 1
else:
builder.branches -= 1
used_sectors.add(sector)
elif sector not in used_sectors:
if entrance in destination_entrances and sector.branches() > 0:
builder.branches -= 1
if entrance not in drop_entrances_allowance:
needed_connections.append(entrance)
builder.allowance = starting_allowance
for entrance in needed_connections:
sector = find_sector(entrance, builder.sectors)
if sector and sector not in used_sectors: # ignore things on same sector
is_destination = entrance in destination_entrances
connect_able = False
if entrance in connections.keys():
enabling_region = connections[entrance]
check_list = list(potentials[enabling_region])
if enabling_region.name in ['Desert Ledge', 'Desert Palace Entrance (North) Spot']:
alternate = 'Desert Palace Entrance (North) Spot' if enabling_region.name == 'Desert Ledge' else 'Desert Ledge'
if world.get_region(alternate, player) in potentials:
check_list.extend(potentials[world.get_region(alternate, player)])
connecting_entrances = [x for x in check_list if x != entrance and x not in dead_entrances and x not in drop_entrances_allowance]
connect_able = len(connecting_entrances) > 0
if is_destination and sector.branches() == 0: #
builder.dead_ends += 1
if is_destination and sector.branches() > 0:
builder.branches -= 1
if connect_able and not is_destination:
builder.allowance += 1
used_sectors.add(sector)
def define_sector_features(sectors):
for sector in sectors:
for region in sector.regions:
for loc in region.locations:
if '- Prize' in loc.name or loc.name in ['Agahnim 1', 'Agahnim 2']:
pass
elif loc.forced_item and 'Small Key' in loc.item.name:
sector.key_only_locations += 1
elif loc.forced_item and loc.forced_item.bigkey:
sector.bk_provided = True
elif loc.name not in dungeon_events and not loc.forced_item:
sector.chest_locations += 1
if '- Big Chest' in loc.name or loc.name in ["Hyrule Castle - Zelda's Chest",
"Thieves' Town - Blind's Cell"]:
sector.bk_required = True
for ext in region.exits:
door = ext.door
if door is not None:
if door.crystal == CrystalBarrier.Either:
sector.c_switch = True
elif door.crystal == CrystalBarrier.Orange:
sector.orange_barrier = True
elif door.crystal == CrystalBarrier.Blue:
sector.blue_barrier = True
if door.bigKey:
sector.bk_required = True
def assign_sector(sector, dungeon, candidate_sectors, global_pole):
if sector:
del candidate_sectors[sector]
global_pole.consume(sector)
assign_sector_helper(sector, dungeon)
def assign_sector_helper(sector, builder):
builder.sectors.append(sector)
builder.location_cnt += sector.chest_locations
builder.key_drop_cnt += sector.key_only_locations
if sector.c_switch:
builder.c_switch_present = True
if sector.blue_barrier:
builder.c_switch_required = True
if sector.bk_required:
builder.bk_required = True
if sector.bk_provided:
builder.bk_provided = True
count_conn_needed_supplied(sector, builder.conn_needed, builder.conn_supplied)
builder.dead_ends += sector.dead_ends()
builder.branches += sector.branches()
if sector in builder.exception_list:
builder.exception_list.remove(sector)
else:
if builder.split_dungeon_map:
builder.split_dungeon_map = None
if builder.valid_proposal:
builder.valid_proposal = None
def count_conn_needed_supplied(sector, conn_needed, conn_supplied):
for door in sector.outstanding_doors:
# todo: destination sectors like skull 2 west should be
if (door.blocked or door.dead or sector.adj_outflow() <= 1) and not sector.is_entrance_sector():
conn_needed[hook_from_door(door)] += 1
# todo: stonewall
else: # todo: dungeons that need connections... skull, tr, hc, desert (when edges are done)
conn_supplied[hanger_from_door(door)] += 1
def find_sector(r_name, sectors):
for s in sectors:
if r_name in s.region_set():
return s
return None
def assign_location_sectors(dungeon_map, free_location_sectors, global_pole):
valid = False
choices = None
sector_list = list(free_location_sectors)
random.shuffle(sector_list)
while not valid:
choices, d_idx, totals = weighted_random_locations(dungeon_map, sector_list)
for i, sector in enumerate(sector_list):
choice = d_idx[choices[i].name]
totals[choice] += sector.chest_locations
valid = True
for d_name, idx in d_idx.items():
if totals[idx] < 5: # min locations for dungeons is 5 (bk exception)
valid = False
break
for i, choice in enumerate(choices):
builder = dungeon_map[choice.name]
assign_sector(sector_list[i], builder, free_location_sectors, global_pole)
def weighted_random_locations(dungeon_map, free_location_sectors):
population = []
ttl_assigned = 0
weights = []
totals = []
d_idx = {}
for i, dungeon_builder in enumerate(dungeon_map.values()):
population.append(dungeon_builder)
totals.append(dungeon_builder.location_cnt)
ttl_assigned += dungeon_builder.location_cnt
weights.append(6.375)
d_idx[dungeon_builder.name] = i
average = ttl_assigned / 13
for i, db in enumerate(population):
if db.location_cnt < average:
weights[i] += average - db.location_cnt
if db.location_cnt > average:
weights[i] = max(0, weights[i] - db.location_cnt + average)
choices = random.choices(population, weights, k=len(free_location_sectors))
return choices, d_idx, totals
def assign_crystal_switch_sectors(dungeon_map, crystal_switches, crystal_barriers, global_pole, assign_one=False):
population = []
some_c_switches_present = False
for name, builder in dungeon_map.items():
if builder.c_switch_required and not builder.c_switch_present and not builder.c_locked:
population.append(name)
if builder.c_switch_present and not builder.c_locked:
some_c_switches_present = True
if len(population) == 0: # nothing needs a switch
if assign_one and not some_c_switches_present: # something should have one
if len(crystal_switches) == 0:
raise GenerationException('No crystal switches to assign. Ref %s' % next(iter(dungeon_map.keys())))
valid, builder_choice, switch_choice = False, None, None
switch_candidates = list(crystal_switches)
switch_choice = random.choice(switch_candidates)
switch_candidates.remove(switch_choice)
builder_candidates = [name for name, builder in dungeon_map.items() if not builder.c_locked]
while not valid:
if len(builder_candidates) == 0:
if len(switch_candidates) == 0:
raise GenerationException('No where to assign crystal switch. Ref %s' % next(iter(dungeon_map.keys())))
switch_choice = random.choice(switch_candidates)
switch_candidates.remove(switch_choice)
builder_candidates = list(dungeon_map.keys())
choice = random.choice(builder_candidates)
builder_candidates.remove(choice)
builder_choice = dungeon_map[choice]
test_set = [switch_choice]
test_set.extend(crystal_barriers)
valid = global_pole.is_valid_choice(dungeon_map, builder_choice, test_set)
assign_sector(switch_choice, builder_choice, crystal_switches, global_pole)
return crystal_switches
if len(crystal_switches) == 0:
raise GenerationException('No crystal switches to assign')
sector_list = list(crystal_switches)
if len(population) > len(sector_list):
raise GenerationException('Not enough crystal switch sectors for those needed')
choices = random.sample(sector_list, k=len(population))
for i, choice in enumerate(choices):
builder = dungeon_map[population[i]]
assign_sector(choice, builder, crystal_switches, global_pole)
return crystal_switches
def ensure_crystal_switches_reachable(dungeon_map, crystal_switches, polarized_sectors, crystal_barriers, global_pole):
invalid_builders = []
for name, builder in dungeon_map.items():
if builder.c_switch_present and builder.c_switch_required and not builder.c_locked:
invalid_builders.append(builder)
while len(invalid_builders) > 0:
valid_builders = []
for builder in invalid_builders:
entrance_sectors = []
reachable_crystals = defaultdict()
for sector in builder.sectors:
if sector.equations is None:
sector.equations = calc_sector_equations(sector)
if sector.is_entrance_sector() and not sector.destination_entrance:
need_switch = True
for region in sector.get_start_regions():
if region.crystal_switch:
need_switch = False
break
any_benefit = False
for eq in sector.equations:
if len(eq.benefit) > 0:
any_benefit = True
break
if need_switch and any_benefit:
entrance_sectors.append(sector)
for eq in sector.equations:
if eq.c_switch:
reachable_crystals[hook_from_door(eq.door)] = True
valid_ent_sectors = []
for entrance_sector in entrance_sectors:
other_sectors = [x for x in builder.sectors if x != entrance_sector]
reachable, access = is_c_switch_reachable(entrance_sector, reachable_crystals, other_sectors)
if reachable:
valid_ent_sectors.append(entrance_sector)
else:
candidates = {}
for c in find_pol_cand_for_c_switch(access, reachable_crystals, polarized_sectors):
candidates[c] = 'Polarized'
for c in find_crystal_cand(access, crystal_switches):
candidates[c] = 'Crystals'
for c in find_pol_cand_for_c_switch(access, reachable_crystals, crystal_barriers):
candidates[c] = 'Barriers'
valid, sector, which_list = False, None, None
while not valid:
if len(candidates) <= 0:
raise GenerationException(f'need to provide more sophisticated crystal connection for {entrance_sector}')
sector, which_list = random.choice(list(candidates.items()))
del candidates[sector]
valid = global_pole.is_valid_choice(dungeon_map, builder, [sector])
if which_list == 'Polarized':
assign_sector(sector, builder, polarized_sectors, global_pole)
elif which_list == 'Crystals':
assign_sector(sector, builder, crystal_switches, global_pole)
elif which_list == 'Barriers':
assign_sector(sector, builder, crystal_barriers, global_pole)
entrance_sectors = [x for x in entrance_sectors if x not in valid_ent_sectors]
if len(entrance_sectors) == 0:
valid_builders.append(builder)
invalid_builders = [x for x in invalid_builders if x not in valid_builders]
def is_c_switch_reachable(entrance_sector, reachable_crystals, other_sectors):
current_access = {}
for eq in entrance_sector.equations:
if eq.total_cost() <= 0:
for key, door_list in eq.benefit.items():
for door in door_list:
if door not in eq.crystal_blocked.keys() or eq.crystal_blocked[door] != CrystalBarrier.Blue:
current_access[key] = True
break
for key, flag in current_access.items():
if opposite_h_type(key) in reachable_crystals.keys():
return True, {}
changed = True
while changed:
changed = False
for sector in other_sectors:
for eq in sector.equations:
key, cost_door = eq.cost
if key in current_access.keys() and current_access[key]:
for bene_key, door_list in eq.benefit.items():
for door in door_list:
block_dict = eq.crystal_blocked
if door not in block_dict.keys() or block_dict[door] != CrystalBarrier.Blue:
if bene_key not in current_access.keys():
current_access[bene_key] = True
changed = True
break
for key, flag in current_access.items():
if opposite_h_type(key) in reachable_crystals.keys():
return True, {}
return False, current_access
def find_pol_cand_for_c_switch(access, reachable_crystals, polarized_candidates):
candidates = []
for sector in polarized_candidates:
if pol_cand_matches_access_reach(sector, access, reachable_crystals):
candidates.append(sector)
return candidates
def pol_cand_matches_access_reach(sector, access, reachable_crystals):
if sector.equations is None:
sector.equations = calc_sector_equations(sector)
for eq in sector.equations:
key, cost_door = eq.cost
if key in access.keys() and access[key]:
for bene_key, door_list in eq.benefit.items():
for door in door_list:
if door not in eq.crystal_blocked.keys() or eq.crystal_blocked[door] != CrystalBarrier.Blue:
if opposite_h_type(bene_key) in reachable_crystals.keys():
return True
return False
def find_crystal_cand(access, crystal_switches):
candidates = []
for sector in crystal_switches:
if crystal_cand_matches_access(sector, access):
candidates.append(sector)
return candidates
def crystal_cand_matches_access(sector, access):
if sector.equations is None:
sector.equations = calc_sector_equations(sector)
for eq in sector.equations:
key, cost_door = eq.cost
if key in access.keys() and access[key] and eq.c_switch and len(sector.outstanding_doors) > 1:
return True
return False
def assign_crystal_barrier_sectors(dungeon_map, crystal_barriers, global_pole):
population = []
for name, builder in dungeon_map.items():
if builder.c_switch_present and not builder.c_locked:
population.append(name)
sector_list = list(crystal_barriers)
random.shuffle(sector_list)
choices = random.choices(population, k=len(sector_list))
for i, choice in enumerate(choices):
builder = dungeon_map[choice]
assign_sector(sector_list[i], builder, crystal_barriers, global_pole)
def identify_polarity_issues(dungeon_map):
unconnected_builders = {}
for name, builder in dungeon_map.items():
identify_polarity_issues_internal(name, builder, unconnected_builders)
return unconnected_builders
def identify_polarity_issues_internal(name, builder, unconnected_builders):
if len(builder.sectors) == 1:
return
else:
def sector_filter(x, y):
return x != y
# else:
# def sector_filter(x, y):
# return x != y and (x.outflow() > 1 or is_entrance_sector(builder, x))
connection_flags = {}
for slot in PolSlot:
connection_flags[slot] = {}
for slot2 in PolSlot:
connection_flags[slot][slot2] = False
for sector in builder.sectors:
others = [x for x in builder.sectors if sector_filter(x, sector)]
other_mag = sum_magnitude(others)
sector_mag = sector.magnitude()
check_flags(sector_mag, connection_flags)
unconnected_sector = True
for i in PolSlot:
if sector_mag[i.value] == 0 or other_mag[i.value] > 0 or self_connecting(sector, i, sector_mag):
unconnected_sector = False
break
if unconnected_sector:
for i in PolSlot:
if sector_mag[i.value] > 0 and other_mag[i.value] == 0 and not self_connecting(sector, i, sector_mag):
builder.mag_needed[i] = [x for x in PolSlot if other_mag[x.value] > 0]
if name not in unconnected_builders.keys():
unconnected_builders[name] = builder
ttl_mag = sum_magnitude(builder.sectors)
for slot in PolSlot:
for slot2 in PolSlot:
if ttl_mag[slot.value] > 0 and ttl_mag[slot2.value] > 0 and not connection_flags[slot][slot2]:
builder.mag_needed[slot] = [slot2]
builder.mag_needed[slot2] = [slot]
if name not in unconnected_builders.keys():
unconnected_builders[name] = builder
def self_connecting(sector, slot, magnitude):
return sector.polarity()[slot.value] == 0 and sum(magnitude) > magnitude[slot.value]
def check_flags(sector_mag, connection_flags):
for slot in PolSlot:
for slot2 in PolSlot:
if sector_mag[slot.value] > 0 and sector_mag[slot2.value] > 0:
connection_flags[slot][slot2] = True
if slot != slot2:
for check_slot in PolSlot: # transitivity check
if check_slot not in [slot, slot2] and connection_flags[slot2][check_slot]:
connection_flags[slot][check_slot] = True
connection_flags[check_slot][slot] = True
def identify_simple_branching_issues(dungeon_map):
problem_builders = {}
for name, builder in dungeon_map.items():
if name == 'Skull Woods 2': # i dislike this special case todo: identify destination entrances
builder.conn_supplied[Hook.West] += 1
builder.conn_needed[Hook.East] -= 1
builder.forced_loops = calc_forced_loops(builder.sectors)
if builder.dead_ends + builder.forced_loops * 2 > builder.branches + builder.allowance:
problem_builders[name] = builder
for h_type in Hook:
lack = builder.conn_balance[h_type] = builder.conn_supplied[h_type] - builder.conn_needed[h_type]
if lack < 0:
builder.total_conn_lack += -lack
problem_builders[name] = builder
return problem_builders
def calc_forced_loops(sector_list):
forced_loops = 0
for sector in sector_list:
h_mag = sector.hook_magnitude()
other_sectors = [x for x in sector_list if x != sector]
other_mag = sum_hook_magnitude(other_sectors)
loop_parts = 0
for hook in Hook:
opp = opposite_h_type(hook).value
if h_mag[hook.value] > other_mag[opp] and loop_present(hook, opp, h_mag, other_mag):
loop_parts += (h_mag[hook.value] - other_mag[opp]) / 2
forced_loops += math.floor(loop_parts)
return forced_loops
def loop_present(hook, opp, h_mag, other_mag):
if hook == Hook.Stairs:
return h_mag[hook.value] - other_mag[opp] >= 2
else:
return h_mag[opp] >= h_mag[hook.value] - other_mag[opp]
def is_satisfied(door_dict_list):
for door_dict in door_dict_list:
for door_list in door_dict.values():
if len(door_list) > 0:
return False
return True
# todo: maybe filter by used doors too
# todo: I want the number of door that match is accessible by still
def filter_match_deps(candidate, match_deps):
return [x for x in match_deps if x != candidate]
def sum_magnitude(sector_list):
result = [0] * len(PolSlot)
for sector in sector_list:
vector = sector.magnitude()
for i in range(len(result)):
result[i] = result[i] + vector[i]
return result
def sum_hook_magnitude(sector_list):
result = [0] * len(Hook)
for sector in sector_list:
vector = sector.hook_magnitude()
for i in range(len(result)):
result[i] = result[i] + vector[i]
return result
def sum_polarity(sector_list):
pol = Polarity()
for sector in sector_list:
pol += sector.polarity()
return pol
def assign_polarized_sectors(dungeon_map, polarized_sectors, global_pole, builder_info):
# step 1: fix polarity connection issues
unconnected_builders = identify_polarity_issues(dungeon_map)
while len(unconnected_builders) > 0:
for name, builder in unconnected_builders.items():
candidates = find_connection_candidates(builder.mag_needed, polarized_sectors)
valid, sector = False, None
while not valid:
if len(candidates) == 0:
raise GenerationException('Cross Dungeon Builder: Cannot find a candidate for connectedness. %s' % name)
sector = random.choice(candidates)
candidates.remove(sector)
valid = global_pole.is_valid_choice(dungeon_map, builder, [sector])
assign_sector(sector, builder, polarized_sectors, global_pole)
builder.mag_needed = {}
unconnected_builders = identify_polarity_issues(unconnected_builders)
# step 2: fix dead ends
problem_builders = identify_simple_branching_issues(dungeon_map)
while len(problem_builders) > 0:
for name, builder in problem_builders.items():
candidates, charges = find_simple_branching_candidates(builder, polarized_sectors)
best = min(charges)
best_candidates = [x for i, x in enumerate(candidates) if charges[i] <= best]
valid, choice = False, None
while not valid:
if len(best_candidates) == 0:
if len(candidates) == 0:
raise GenerationException('Cross Dungeon Builder: Simple branch problems: %s' % name)
best = min(charges)
best_candidates = [x for i, x in enumerate(candidates) if charges[i] <= best]
choice = random.choice(best_candidates)
best_candidates.remove(choice)
i = candidates.index(choice)
candidates.pop(i)
charges.pop(i)
valid = global_pole.is_valid_choice(dungeon_map, builder, [choice]) and valid_connected_assignment(builder, [choice])
assign_sector(choice, builder, polarized_sectors, global_pole)
builder.total_conn_lack = 0
builder.conn_balance.clear()
problem_builders = identify_simple_branching_issues(problem_builders)
# step 3: fix neutrality issues
polarity_step_3(dungeon_map, polarized_sectors, global_pole)
# step 4: fix dead ends again
neutral_choices: List[List] = neutralize_the_rest(polarized_sectors)
problem_builders = identify_branching_issues(dungeon_map, builder_info)
while len(problem_builders) > 0:
for name, builder in problem_builders.items():
candidates = find_branching_candidates(builder, neutral_choices, builder_info)
valid, choice = False, None
while not valid:
if len(candidates) <= 0:
raise GenerationException('Cross Dungeon Builder: Complex branch problems: %s' % name)
choice = random.choice(candidates)
candidates.remove(choice)
valid = global_pole.is_valid_choice(dungeon_map, builder, choice) and valid_polarized_assignment(builder, choice)
neutral_choices.remove(choice)
for sector in choice:
assign_sector(sector, builder, polarized_sectors, global_pole)
builder.unfulfilled.clear()
problem_builders = identify_branching_issues(problem_builders, builder_info)
# step 5: assign randomly until gone - must maintain connectedness, neutral polarity, branching, lack, etc.
comb_w_replace = len(dungeon_map) ** len(neutral_choices)
combinations = None
if comb_w_replace <= 1000:
combinations = list(itertools.product(dungeon_map.keys(), repeat=len(neutral_choices)))
random.shuffle(combinations)
tries = 0
while len(polarized_sectors) > 0:
if tries > 1000 or (combinations and tries >= len(combinations)):
raise GenerationException('No valid assignment found. Ref: %s' % next(iter(dungeon_map.keys())))
if combinations:
choices = combinations[tries]
else:
choices = random.choices(list(dungeon_map.keys()), k=len(neutral_choices))
chosen_sectors = defaultdict(list)
for i, choice in enumerate(choices):
chosen_sectors[choice].extend(neutral_choices[i])
all_valid = True
for name, sector_list in chosen_sectors.items():
if not valid_assignment(dungeon_map[name], sector_list, builder_info):
all_valid = False
break
if all_valid:
for i, choice in enumerate(choices):
builder = dungeon_map[choice]
for sector in neutral_choices[i]:
assign_sector(sector, builder, polarized_sectors, global_pole)
tries += 1
def polarity_step_3(dungeon_map, polarized_sectors, global_pole):
# step 3a: fix odd builders
odd_builders = [x for x in dungeon_map.values() if sum_polarity(x.sectors).charge() % 2 != 0]
grouped_choices: List[List] = find_forced_groupings(polarized_sectors, dungeon_map)
random.shuffle(odd_builders)
odd_candidates = find_odd_sectors(grouped_choices)
tries = 0
while len(odd_builders) > 0:
if tries > 1000:
raise GenerationException('Unable to fix dungeon parity. Ref: %s' % next(iter(odd_builders)).name)
best_choices = None
best_charge = sum([x.polarity().charge() for x in dungeon_map.values()])
samples = 0
combos = ncr(len(odd_candidates), len(odd_builders))
sample_target = 100 if combos > 10 else combos * 2
while best_choices is None or samples < sample_target:
samples += 1
if len(odd_candidates) < len(odd_builders):
raise GenerationException(f'Unable to fix dungeon parity - not enough candidates.'
f' Ref: {next(iter(odd_builders)).name}')
choices = random.sample(odd_candidates, k=len(odd_builders))
valid = global_pole.is_valid_multi_choice(dungeon_map, odd_builders, choices)
charge = calc_total_charge(dungeon_map, odd_builders, choices)
if valid and charge < best_charge:
best_choices = choices
best_charge = charge
if samples > sample_target and best_choices is None:
best_choices = choices
best_charge = charge
samples = 0
all_valid = True
for i, candidate_list in enumerate(best_choices):
test_set = find_forced_connections(dungeon_map, candidate_list, polarized_sectors)
builder = odd_builders[i]
if ensure_test_set_connectedness(test_set, builder, polarized_sectors, dungeon_map, global_pole):
all_valid &= valid_branch_only(builder, candidate_list)
else:
all_valid = False
break
if not all_valid:
break
if all_valid:
for i, candidate_list in enumerate(best_choices):
builder = odd_builders[i]
for sector in candidate_list:
assign_sector(sector, builder, polarized_sectors, global_pole)
odd_builders = [x for x in dungeon_map.values() if sum_polarity(x.sectors).charge() % 2 != 0]
else:
tries += 1
# step 3b: neutralize all builders
parallel_full_neutralization(dungeon_map, polarized_sectors, global_pole)
def parallel_full_neutralization(dungeon_map, polarized_sectors, global_pole):
start = time.process_time()
builders = list(dungeon_map.values())
finished = all([x.polarity().is_neutral() for x in builders])
solution_list, current_depth = defaultdict(list), 1
complete_builders = [x for x in builders if x.polarity().is_neutral()]
avail_sectors = list(polarized_sectors)
while not finished:
builders_to_check = [x for x in builders if not (x.polarity()+sum_polarity(solution_list[x])).is_neutral()]
candidates, last_depth = find_exact_neutralizing_candidates_parallel_db(builders_to_check, solution_list,
avail_sectors, current_depth)
increment_depth = True
any_valid = False
for builder, candidate_list in candidates.items():
valid, sectors = False, None
while not valid:
if len(candidate_list) == 0:
increment_depth = False #need to look again at current level
break
sectors = random.choice(candidate_list)
candidate_list.remove(sectors)
proposal = solution_list.copy()
proposal[builder] = list(proposal[builder])
proposal[builder].extend(sectors)
valid = global_pole.is_valid_multi_choice_2(dungeon_map, builders, proposal)
if valid:
any_valid = True
solution_list[builder].extend(sectors)
for sector in sectors:
avail_sectors.remove(sector)
complete_builders.append(builder)
for other_builder, other_cand_list in candidates.items():
if other_builder not in complete_builders:
candidates_to_remove = list()
for candidate in other_cand_list:
for sector in sectors:
if sector in candidate:
candidates_to_remove.append(candidate)
break
other_cand_list[:] = [x for x in other_cand_list if x not in candidates_to_remove]
# remove sectors from other candidate lists
if not any_valid:
increment_depth = True
current_depth = last_depth + 1 if increment_depth else last_depth
finished = all([(x.polarity()+sum_polarity(solution_list[x])).is_neutral() for x in builders])
logging.getLogger('').info(f'-Balanced solution found in {time.process_time()-start}')
for builder, sectors in solution_list.items():
for sector in sectors:
assign_sector(sector, builder, polarized_sectors, global_pole)
def find_forced_connections(dungeon_map, candidate_list, polarized_sectors):
test_set = list(candidate_list)
other_sectors = [x for x in polarized_sectors if x not in candidate_list]
dungeon_hooks = defaultdict(int)
for name, builder in dungeon_map.items():
d_mag = sum_hook_magnitude(builder.sectors)
for val in Hook:
dungeon_hooks[val] += d_mag[val.value]
queue = deque(candidate_list)
while queue:
candidate = queue.popleft()
c_mag = candidate.hook_magnitude()
other_candidates = [x for x in candidate_list if x != candidate]
for val in Hook:
if c_mag[val.value] > 0:
opp = opposite_h_type(val)
o_val = opp.value
if sum_hook_magnitude(other_candidates)[o_val] == 0 and dungeon_hooks[opp] == 0 and not valid_self(c_mag, val, opp):
forced_sector = []
for sec in other_sectors:
if sec.hook_magnitude()[o_val] > 0:
forced_sector.append(sec)
if len(forced_sector) > 1:
break
if len(forced_sector) == 1:
test_set.append(forced_sector[0])
return test_set
def valid_self(c_mag, val, opp):
if val == Hook.Stairs:
return c_mag[val.value] > 2
else:
return c_mag[opp.value] > 0 and sum(c_mag) > 2
def ensure_test_set_connectedness(test_set, builder, polarized_sectors, dungeon_map, global_pole):
test_copy = list(test_set)
while not valid_connected_assignment(builder, test_copy):
dummy_builder = DungeonBuilder("Dummy Builder for " + builder.name)
dummy_builder.sectors = builder.sectors + test_copy
possibles = [x for x in polarized_sectors if x not in test_copy]
candidates = find_connected_candidates(possibles)
valid, sector = False, None
while not valid:
if len(candidates) == 0:
return False
sector = random.choice(candidates)
candidates.remove(sector)
t2 = test_copy+[sector]
valid = global_pole.is_valid_choice(dungeon_map, builder, t2) and valid_branch_only(builder, t2)
test_copy.append(sector)
dummy_builder.sectors = builder.sectors + test_copy
test_set[:] = test_copy
return True
def calc_total_charge(dungeon_map, builders, sector_lists):
polarity_list = [x.polarity() for x in dungeon_map.values() if x not in builders]
for i, sectors in enumerate(sector_lists):
builder = builders[i]
polarity = builder.polarity() + sum_polarity(sectors)
polarity_list.append(polarity)
return sum([x.charge() for x in polarity_list])
class GlobalPolarity:
def __init__(self, candidate_sectors):
self.positives = [0, 0, 0]
self.negatives = [0, 0, 0]
self.evens = 0
self.odds = 0
for sector in candidate_sectors:
pol = sector.polarity()
if pol.charge() % 2 == 0:
self.evens += 1
else:
self.odds += 1
for slot in PolSlot:
if pol.vector[slot.value] < 0:
self.negatives[slot.value] += -pol.vector[slot.value]
elif pol.vector[slot.value] > 0:
self.positives[slot.value] += pol.vector[slot.value]
def copy(self):
gp = GlobalPolarity([])
gp.positives = self.positives.copy()
gp.negatives = self.negatives.copy()
gp.evens = self.evens
gp.odds = self.odds
return gp
def is_valid(self, dungeon_map):
polarities = [x.polarity() for x in dungeon_map.values()]
return self._check_parity(polarities) and self._is_valid_polarities(polarities)
def _check_parity(self, polarities):
local_evens = 0
local_odds = 0
for pol in polarities:
if pol.charge() % 2 == 0:
local_evens += 1
else:
local_odds += 1
if local_odds > self.odds:
return False
return True
def _is_valid_polarities(self, polarities):
positives = self.positives.copy()
negatives = self.negatives.copy()
for polarity in polarities:
for slot in PolSlot:
if polarity[slot.value] > 0 and slot != PolSlot.Stairs:
if negatives[slot.value] >= polarity[slot.value]:
negatives[slot.value] -= polarity[slot.value]
else:
return False
elif polarity[slot.value] < 0 and slot != PolSlot.Stairs:
if positives[slot.value] >= -polarity[slot.value]:
positives[slot.value] += polarity[slot.value]
else:
return False
elif slot == PolSlot.Stairs:
if positives[slot.value] >= polarity[slot.value]:
positives[slot.value] -= polarity[slot.value]
else:
return False
return True
def consume(self, sector):
polarity = sector.polarity()
if polarity.charge() % 2 == 0:
self.evens -= 1
else:
self.odds -= 1
for slot in PolSlot:
if polarity[slot.value] > 0 and slot != PolSlot.Stairs:
if self.positives[slot.value] >= polarity[slot.value]:
self.positives[slot.value] -= polarity[slot.value]
else:
raise GenerationException('Invalid assignment of %s' % sector.name)
elif polarity[slot.value] < 0 and slot != PolSlot.Stairs:
if self.negatives[slot.value] >= -polarity[slot.value]:
self.negatives[slot.value] += polarity[slot.value]
else:
raise GenerationException('Invalid assignment of %s' % sector.name)
elif slot == PolSlot.Stairs:
if self.positives[slot.value] >= polarity[slot.value]:
self.positives[slot.value] -= polarity[slot.value]
else:
raise GenerationException('Invalid assignment of %s' % sector.name)
def is_valid_choice(self, dungeon_map, builder, sectors):
proposal = self.copy()
non_neutral_polarities = [x.polarity() for x in dungeon_map.values() if not x.polarity().is_neutral() and x != builder]
current_polarity = builder.polarity() + sum_polarity(sectors)
non_neutral_polarities.append(current_polarity)
for sector in sectors:
proposal.consume(sector)
return proposal._check_parity(non_neutral_polarities) and proposal._is_valid_polarities(non_neutral_polarities)
def is_valid_multi_choice(self, dungeon_map, builders, sector_lists):
proposal = self.copy()
non_neutral_polarities = [x.polarity() for x in dungeon_map.values() if not x.polarity().is_neutral()
and x not in builders]
for i, sectors in enumerate(sector_lists):
builder = builders[i]
current_polarity = builder.polarity() + sum_polarity(sectors)
non_neutral_polarities.append(current_polarity)
for sector in sectors:
proposal.consume(sector)
return proposal._check_parity(non_neutral_polarities) and proposal._is_valid_polarities(non_neutral_polarities)
def is_valid_multi_choice_2(self, dungeon_map, builders, sector_dict):
proposal = self.copy()
non_neutral_polarities = [x.polarity() for x in dungeon_map.values() if not x.polarity().is_neutral()
and x not in builders]
for builder, sectors in sector_dict.items():
current_polarity = builder.polarity() + sum_polarity(sectors)
non_neutral_polarities.append(current_polarity)
for sector in sectors:
proposal.consume(sector)
return proposal._check_parity(non_neutral_polarities) and proposal._is_valid_polarities(non_neutral_polarities)
# def check_odd_polarities(self, candidate_sectors, dungeon_map):
# odd_candidates = [x for x in candidate_sectors if x.polarity().charge() % 2 != 0]
# odd_map = {n: x for (n, x) in dungeon_map.items() if sum_polarity(x.sectors).charge() % 2 != 0}
# gp = GlobalPolarity(odd_candidates)
# return gp.is_valid(odd_map)
def find_connection_candidates(mag_needed, sector_pool):
candidates = []
for sector in sector_pool:
if sector.branching_factor() < 2:
continue
mag = sector.magnitude()
matches = False
for slot, match_slot in mag_needed.items():
if mag[slot.value] > 0:
for i in PolSlot:
if i in match_slot and mag[i.value] > 0:
matches = True
break
if matches:
candidates.append(sector)
return candidates
def find_simple_branching_candidates(builder, sector_pool):
candidates = defaultdict(list)
charges = defaultdict(list)
outflow_needed = builder.dead_ends + builder.forced_loops * 2 > builder.branches + builder.allowance
total_needed = builder.dead_ends + builder.forced_loops * 2 - builder.branches + builder.allowance
original_lack = builder.total_conn_lack
best_lack = original_lack
for sector in sector_pool:
if outflow_needed and sector.branching_factor() <= 2:
continue
calc_sector_balance(sector)
ttl_lack = 0
for hook in Hook:
lack = builder.conn_balance[hook] + sector.conn_balance[hook]
if lack < 0:
ttl_lack += -lack
forced_loops = calc_forced_loops(builder.sectors + [sector])
net_outflow = builder.dead_ends + forced_loops * 2 + sector.dead_ends() - builder.branches - builder.allowance - sector.branches()
valid_branches = net_outflow < total_needed
if valid_branches and (ttl_lack < original_lack or original_lack >= 0):
candidates[ttl_lack].append(sector)
charges[ttl_lack].append((builder.polarity()+sector.polarity()).charge())
if ttl_lack < best_lack:
best_lack = ttl_lack
if best_lack == original_lack and not outflow_needed:
raise GenerationException('These candidates may not help at all')
if len(candidates[best_lack]) <= 0:
raise GenerationException('Nothing can fix the simple branching issue. Panic ensues.')
return candidates[best_lack], charges[best_lack]
def calc_sector_balance(sector): # todo: move to base class?
if sector.conn_balance is None:
sector.conn_balance = defaultdict(int)
for door in sector.outstanding_doors:
if door.blocked or door.dead or sector.branching_factor() <= 1:
sector.conn_balance[hook_from_door(door)] -= 1
else:
sector.conn_balance[hanger_from_door(door)] += 1
def find_odd_sectors(grouped_candidates):
return [x for x in grouped_candidates if sum_polarity(x).charge() % 2 != 0]
# This is related to the perfect sum problem in CS
# * Best algorithm so far - db for dynamic programming
# * Keeps track of unique deviations from neutral in the index
# * Another assumption is that solutions that take fewer sector are more ideal
# * When attempting to add depth and there are no more possibilities, this raises an Exception
# * Each depth should be checked before asking for another one
# An alterative approach would be to trim the db after deciding the candidate at the current depth will be
# part of the propsoal
def find_exact_neutralizing_candidates_parallel_db(builders, proposal, avail_sectors, current_depth):
candidate_map = defaultdict(list)
polarity_map = {}
for builder in builders:
polarity_map[builder] = builder.polarity() + sum_polarity(proposal[builder])
finished = False
db, index = create_db_for_depth(current_depth, avail_sectors)
while not finished:
depth_map = db[current_depth]
for builder in builders:
target = polarity_map[builder].complement()
if target in depth_map.keys():
finished = True
candidate_map[builder].extend(depth_map[target].keys())
if finished:
for builder in list(candidate_map.keys()):
try:
candidate_map[builder] = weed_candidates(builder, {0: candidate_map[builder]}, 0)
except NeutralizingException:
del candidate_map[builder]
if len(candidate_map) == 0:
finished = False
if not finished:
current_depth += 1
add_depth_to_db(db, index, current_depth, avail_sectors)
return candidate_map, current_depth
def create_db_for_depth(depth, avail_sectors):
db = {0: {Polarity(): {OrderedFrozenSet(): None}}}
db_index = {Polarity()}
for i in range(1, depth+1):
add_depth_to_db(db, db_index, i, avail_sectors)
return db, db_index
def add_depth_to_db(db, db_index, i, avail_sectors):
previous = db[i-1]
depth_map = defaultdict(dict)
index_additions = set()
for sector in avail_sectors:
sector_set = {sector}
sector_pol = sector.polarity()
for polarity, choices in previous.items():
combo_pol = sector_pol + polarity
if combo_pol not in db_index:
index_additions.add(combo_pol)
for choice in choices:
if sector in choice.frozen_set:
continue
new_set = choice.new_with_element(sector_set)
depth_map[combo_pol][new_set] = None
for addition in index_additions:
if len(depth_map[addition]) > 0:
db_index.add(addition)
else:
del depth_map[addition]
if len(depth_map) == 0:
raise NeutralizingException('There is not a solution for this particular combination. Crystal switch issue?') # restart required
db[i] = depth_map
class OrderedFrozenSet:
def __init__(self):
self.frozen_set = frozenset()
self.order = []
def __eq__(self, other):
return self.frozen_set == other.frozen_set
def __hash__(self):
return hash(self.frozen_set)
def __iter__(self):
return self.order.__iter__()
def __len__(self):
return len(self.order)
def new_with_element(self, elements):
ret = OrderedFrozenSet()
ret.frozen_set = frozenset(self.frozen_set | elements)
ret.order = list(self.order)
ret.order.extend(elements)
return ret
# this could be re-worked for the more complete solution
# i'm not sure it does a whole lot now
def weed_candidates(builder, candidates, best_charge):
official_cand = []
while len(official_cand) == 0:
if len(candidates.keys()) == 0:
raise NeutralizingException('Cross Dungeon Builder: Weeded out all candidates %s' % builder.name)
while best_charge not in candidates.keys():
best_charge += 1
candidate_list = candidates.pop(best_charge)
best_lack = None
for cand in candidate_list:
ttl_deads = 0
ttl_branches = 0
for sector in cand:
calc_sector_balance(sector)
ttl_deads += sector.dead_ends()
ttl_branches += sector.branches()
ttl_lack = 0
ttl_balance = 0
for hook in Hook:
bal = 0
for sector in cand:
bal += sector.conn_balance[hook]
lack = builder.conn_balance[hook] + bal
ttl_balance += lack
if lack < 0:
ttl_lack += -lack
forced_loops = calc_forced_loops(builder.sectors + list(cand))
if ttl_balance >= 0 and builder.dead_ends + ttl_deads + forced_loops * 2 <= builder.branches + ttl_branches + builder.allowance:
if best_lack is None or ttl_lack < best_lack:
best_lack = ttl_lack
official_cand = [cand]
elif ttl_lack == best_lack:
official_cand.append(cand)
# choose from among those that use less
best_len = None
cand_len = []
for cand in official_cand:
size = len(cand)
if best_len is None or size < best_len:
best_len = size
cand_len = [cand]
elif size == best_len:
cand_len.append(cand)
return cand_len
def find_branching_candidates(builder, neutral_choices, builder_info):
candidates = []
for choice in neutral_choices:
resolved, problem_list = check_for_valid_layout(builder, choice, builder_info)
if resolved:
candidates.append(choice)
return candidates
def find_connected_candidates(sector_pool):
candidates = []
for sector in sector_pool:
if sector.adj_outflow() >= 2:
candidates.append(sector)
return candidates
def neutralize_the_rest(sector_pool):
neutral_choices = []
main_pool = list(sector_pool)
failed_pool = []
r_size = 1
while len(main_pool) > 0 or len(failed_pool) > 0:
if len(main_pool) <= r_size:
main_pool.extend(failed_pool)
failed_pool.clear()
r_size += 1
candidate = random.choice(main_pool)
main_pool.remove(candidate)
if r_size > len(main_pool):
raise GenerationException("Cross Dungeon Builder: no more neutral pairings possible")
combinations = ncr(len(main_pool), r_size)
itr = 0
done = False
while not done:
ttl_polarity = candidate.polarity()
choice_set = kth_combination(itr, main_pool, r_size)
for choice in choice_set:
ttl_polarity += choice.polarity()
if ttl_polarity.is_neutral():
choice_set.append(candidate)
neutral_choices.append(choice_set)
main_pool = [x for x in main_pool if x not in choice_set]
failed_pool = [x for x in failed_pool if x not in choice_set]
done = True
else:
itr += 1
if itr >= combinations:
failed_pool.append(candidate)
done = True
return neutral_choices
# doesn't force a grouping when all in the found_list comes from the same sector
def find_forced_groupings(sector_pool, dungeon_map):
dungeon_hooks = {}
for name, builder in dungeon_map.items():
dungeon_hooks[name] = categorize_groupings(builder.sectors)
groupings = []
queue = deque(sector_pool)
skips = set()
while len(queue) > 0:
grouping = queue.popleft()
is_list = isinstance(grouping, List)
if not is_list and grouping in skips:
continue
grouping = grouping if is_list else [grouping]
hook_categories = categorize_groupings(grouping)
force_found = False
for val in Hook:
if val in hook_categories.keys():
required_doors, flexible_doors = hook_categories[val]
if len(required_doors) >= 1:
opp = opposite_h_type(val)
found_list = []
if opp in hook_categories.keys() and len(hook_categories[opp][1]) > 0:
found_list.extend(hook_categories[opp][1])
for name, hooks in dungeon_hooks.items():
if opp in hooks.keys() and len(hooks[opp][1]) > 0:
found_list.extend(hooks[opp][1])
other_sectors = [x for x in sector_pool if x not in grouping]
other_sector_cats = categorize_groupings(other_sectors)
if opp in other_sector_cats.keys() and len(other_sector_cats[opp][1]) > 0:
found_list.extend(other_sector_cats[opp][1])
if len(required_doors) == len(found_list):
forced_sectors = []
for sec in other_sectors:
cats = categorize_groupings([sec])
if opp in cats.keys() and len(cats[opp][1]) > 0:
forced_sectors.append(sec)
if len(forced_sectors) > 0:
grouping.extend(forced_sectors)
skips.update(forced_sectors)
merge_groups = []
for group in groupings:
for sector in group:
if sector in forced_sectors:
merge_groups.append(group)
for merge in merge_groups:
grouping = list(set(grouping).union(set(merge)))
groupings.remove(merge)
queue.append(grouping)
force_found = True
elif len(flexible_doors) == 1:
opp = opposite_h_type(val)
found_list = []
if opp in hook_categories.keys() and (len(hook_categories[opp][0]) > 0 or len(hook_categories[opp][1]) > 0):
found_list.extend(hook_categories[opp][0])
found_list.extend([x for x in hook_categories[opp][1] if x not in flexible_doors])
for name, hooks in dungeon_hooks.items():
if opp in hooks.keys() and (len(hooks[opp][0]) > 0 or len(hooks[opp][1]) > 0):
found_list.extend(hooks[opp][0])
found_list.extend(hooks[opp][1])
other_sectors = [x for x in sector_pool if x not in grouping]
other_sector_cats = categorize_groupings(other_sectors)
if opp in other_sector_cats.keys() and (len(other_sector_cats[opp][0]) > 0 or len(other_sector_cats[opp][1]) > 0):
found_list.extend(other_sector_cats[opp][0])
found_list.extend(other_sector_cats[opp][1])
if len(found_list) == 1:
forced_sectors = []
for sec in other_sectors:
cats = categorize_groupings([sec])
if opp in cats.keys() and (len(cats[opp][0]) > 0 or len(cats[opp][1]) > 0):
forced_sectors.append(sec)
if len(forced_sectors) > 0:
grouping.extend(forced_sectors)
skips.update(forced_sectors)
merge_groups = []
for group in groupings:
for sector in group:
if sector in forced_sectors:
merge_groups.append(group)
for merge in merge_groups:
grouping += merge
groupings.remove(merge)
queue.append(grouping)
force_found = True
if force_found:
break
if not force_found:
groupings.append(grouping)
return groupings
def categorize_groupings(sectors):
hook_categories = {}
for sector in sectors:
for door in sector.outstanding_doors:
hook = hook_from_door(door)
if hook not in hook_categories.keys():
hook_categories[hook] = ([], [])
if door.blocked or door.dead:
hook_categories[hook][0].append(door)
else:
hook_categories[hook][1].append(door)
return hook_categories
def valid_assignment(builder, sector_list, builder_info):
if not valid_entrance(builder, sector_list, builder_info):
return False
if not valid_c_switch(builder, sector_list):
return False
if not valid_polarized_assignment(builder, sector_list):
return False
resolved, problems = check_for_valid_layout(builder, sector_list, builder_info)
return resolved
def valid_entrance(builder, sector_list, builder_info):
is_dead_end = False
if len(builder.sectors) == 0:
is_dead_end = True
else:
entrances, splits, c_tuple, world, player = builder_info
if builder.name not in entrances.keys():
name_parts = builder.name.rsplit(' ', 1)
entrance_list = splits[name_parts[0]][name_parts[1]]
entrances = []
for sector in builder.sectors:
if sector.is_entrance_sector():
sector.region_set()
entrances.append(sector)
all_dead = True
for sector in entrances:
for region in entrance_list:
if region in sector.region_set():
portal = next((x for x in world.dungeon_portals[player] if x.door.entrance.parent_region.name == region), None)
if portal and not portal.deadEnd:
all_dead = False
break
if not all_dead:
break
is_dead_end = all_dead
return len(sector_list) == 0 if is_dead_end else True
def valid_c_switch(builder, sector_list):
if builder.c_switch_present:
return True
for sector in sector_list:
if sector.c_switch:
return True
if builder.c_switch_required:
return False
for sector in sector_list:
if sector.blue_barrier:
return False
return True
def valid_connected_assignment(builder, sector_list):
full_list = sector_list + builder.sectors
if len(full_list) == 1 and sum_magnitude(full_list) == [0, 0, 0]:
return True
for sector in full_list:
if sector.is_entrance_sector():
continue
others = [x for x in full_list if x != sector]
other_mag = sum_magnitude(others)
sector_mag = sector.magnitude()
hookable = False
for i in range(len(sector_mag)):
if sector_mag[i] > 0 and other_mag[i] > 0:
hookable = True
if not hookable:
return False
return True
def valid_branch_assignment(builder, sector_list):
if not valid_connected_assignment(builder, sector_list):
return False
return valid_branch_only(builder, sector_list)
def valid_branch_only(builder, sector_list):
forced_loops = calc_forced_loops(builder.sectors + sector_list)
ttl_deads = 0
ttl_branches = 0
for s in sector_list:
# calc_sector_balance(sector) # do I want to check lack here? see weed_candidates
ttl_deads += s.dead_ends()
ttl_branches += s.branches()
return builder.dead_ends + ttl_deads + forced_loops * 2 <= builder.branches + ttl_branches + builder.allowance
def valid_polarized_assignment(builder, sector_list):
if not valid_branch_assignment(builder, sector_list):
return False
return (sum_polarity(sector_list) + sum_polarity(builder.sectors)).is_neutral()
def assign_the_rest(dungeon_map, neutral_sectors, global_pole, builder_info):
comb_w_replace = len(dungeon_map) ** len(neutral_sectors)
combinations = None
if comb_w_replace <= 1000:
combinations = list(itertools.product(dungeon_map.keys(), repeat=len(neutral_sectors)))
random.shuffle(combinations)
tries = 0
while len(neutral_sectors) > 0:
if tries > 1000 or (combinations and tries >= len(combinations)):
raise GenerationException('No valid assignment found for "neutral" sectors. Ref: %s' % next(iter(dungeon_map.keys())))
# sector_list = list(neutral_sectors)
if combinations:
choices = combinations[tries]
else:
choices = random.choices(list(dungeon_map.keys()), k=len(neutral_sectors))
neutral_sector_list = list(neutral_sectors)
chosen_sectors = defaultdict(list)
for i, choice in enumerate(choices):
chosen_sectors[choice].append(neutral_sector_list[i])
all_valid = True
for name, sector_list in chosen_sectors.items():
if not valid_assignment(dungeon_map[name], sector_list, builder_info):
all_valid = False
break
if all_valid:
for name, sector_list in chosen_sectors.items():
builder = dungeon_map[name]
for sector in sector_list:
assign_sector(sector, builder, neutral_sectors, global_pole)
tries += 1
def split_dungeon_builder(builder, split_list, builder_info):
if builder.split_dungeon_map and len(builder.exception_list) == 0:
for name, proposal in builder.valid_proposal.items():
builder.split_dungeon_map[name].valid_proposal = proposal
return builder.split_dungeon_map # we made this earlier in gen, just use it
attempts, comb_w_replace, merge_attempt, merge_limit = 0, None, 0, len(split_list) - 1
while attempts < 5: # does not solve coin flips 3% of the time
try:
candidate_sectors = dict.fromkeys(builder.sectors)
global_pole = GlobalPolarity(candidate_sectors)
dungeon_map, sub_builder, merge_keys = {}, None, []
if merge_attempt > 0:
candidates = []
for name, split_entrances in split_list.items():
if len(split_entrances) > 1:
candidates.append(name)
continue
elif len(split_entrances) <= 0:
continue
ents, splits, c_tuple, world, player = builder_info
r_name = split_entrances[0]
p = next((x for x in world.dungeon_portals[player] if x.door.entrance.parent_region.name == r_name), None)
if p and not p.deadEnd:
candidates.append(name)
merge_keys = random.sample(candidates, merge_attempt+1) if len(candidates) >= merge_attempt+1 else []
for name, split_entrances in split_list.items():
key = builder.name + ' ' + name
if merge_keys and name in merge_keys:
other_keys = [builder.name + ' ' + x for x in merge_keys if x != name]
other_key = next((x for x in other_keys if x in dungeon_map), None)
if other_key:
key = other_key
sub_builder = dungeon_map[other_key]
sub_builder.all_entrances.extend(split_entrances)
if key not in dungeon_map:
dungeon_map[key] = sub_builder = DungeonBuilder(key)
sub_builder.split_flag = True
sub_builder.all_entrances = list(split_entrances)
for r_name in split_entrances:
assign_sector(find_sector(r_name, candidate_sectors), sub_builder, candidate_sectors, global_pole)
comb_w_replace = len(dungeon_map) ** len(candidate_sectors)
return balance_split(candidate_sectors, dungeon_map, global_pole, builder_info)
except (GenerationException, NeutralizingException):
if comb_w_replace and comb_w_replace <= 10000:
attempts += 5 # all the combinations were tried already, no use repeating
else:
attempts += 1
if attempts >= 5 and merge_attempt < merge_limit:
merge_attempt, attempts = merge_attempt + 1, 0
raise GenerationException('Unable to resolve in 5 attempts')
def balance_split(candidate_sectors, dungeon_map, global_pole, builder_info):
dungeon_entrances, split_dungeon_entrances, connections_tuple, world, player = builder_info
for name, builder in dungeon_map.items():
calc_allowance_and_dead_ends(builder, connections_tuple, world, player)
comb_w_replace = len(dungeon_map) ** len(candidate_sectors)
if comb_w_replace <= 10000:
combinations = list(itertools.product(dungeon_map.keys(), repeat=len(candidate_sectors)))
random.shuffle(combinations)
tries = 0
while tries < len(combinations):
choices = combinations[tries]
main_sector_list = list(candidate_sectors)
chosen_sectors = defaultdict(list)
for i, choice in enumerate(choices):
chosen_sectors[choice].append(main_sector_list[i])
all_valid = True
for name, builder in dungeon_map.items():
if not valid_assignment(builder, chosen_sectors[name], builder_info):
all_valid = False
break
if all_valid:
for name, sector_list in chosen_sectors.items():
builder = dungeon_map[name]
for sector in sector_list:
assign_sector(sector, builder, candidate_sectors, global_pole)
return dungeon_map
tries += 1
raise GenerationException('Split Dungeon Builder: Impossible dungeon. Ref %s' % next(iter(dungeon_map.keys())))
# categorize sectors
check_for_forced_dead_ends(dungeon_map, candidate_sectors, global_pole)
check_for_forced_assignments(dungeon_map, candidate_sectors, global_pole)
check_for_forced_crystal(dungeon_map, candidate_sectors, global_pole)
crystal_switches, crystal_barriers, neutral_sectors, polarized_sectors = categorize_sectors(candidate_sectors)
leftover = assign_crystal_switch_sectors(dungeon_map, crystal_switches, crystal_barriers,
global_pole, len(crystal_barriers) > 0)
ensure_crystal_switches_reachable(dungeon_map, leftover, polarized_sectors, crystal_barriers, global_pole)
for sector in leftover:
if sector.polarity().is_neutral():
neutral_sectors[sector] = None
else:
polarized_sectors[sector] = None
# blue barriers
assign_crystal_barrier_sectors(dungeon_map, crystal_barriers, global_pole)
# polarity:
assign_polarized_sectors(dungeon_map, polarized_sectors, global_pole, builder_info)
# the rest
assign_the_rest(dungeon_map, neutral_sectors, global_pole, builder_info)
return dungeon_map
def check_for_forced_dead_ends(dungeon_map, candidate_sectors, global_pole):
dead_end_sectors = [x for x in candidate_sectors if x.branching_factor() <= 1]
other_sectors = [x for x in candidate_sectors if x not in dead_end_sectors]
for name, builder in dungeon_map.items():
other_sectors += builder.sectors
other_magnitude = sum_hook_magnitude(other_sectors)
dead_cnt = [0] * len(Hook)
for sector in dead_end_sectors:
hook_mag = sector.hook_magnitude()
for hook in Hook:
if hook_mag[hook.value] != 0:
dead_cnt[hook.value] += 1
for hook in Hook:
opp = opposite_h_type(hook).value
if dead_cnt[hook.value] > other_magnitude[opp]:
raise GenerationException('Impossible to satisfy all these dead ends')
elif dead_cnt[hook.value] == other_magnitude[opp]:
candidates = [x for x in dead_end_sectors if x.hook_magnitude()[hook.value] > 0]
for sector in other_sectors:
if sector.hook_magnitude()[opp] > 0 and sector.is_entrance_sector() and sector.branching_factor() == 2:
builder = None
for b in dungeon_map.values():
if sector in b.sectors:
builder = b
break
valid, candidate_sector = False, None
while not valid:
if len(candidates) == 0:
raise GenerationException('Split Dungeon Builder: Bad dead end %s' % builder.name)
candidate_sector = random.choice(candidates)
candidates.remove(candidate_sector)
valid = global_pole.is_valid_choice(dungeon_map, builder, [candidate_sector]) and check_crystal(candidate_sector, sector)
assign_sector(candidate_sector, builder, candidate_sectors, global_pole)
builder.c_locked = True
def check_crystal(dead_end, entrance):
if dead_end.blue_barrier and not entrance.c_switch and not dead_end.c_switch:
return False
if entrance.blue_barrier and not entrance.c_switch and not dead_end.c_switch:
return False
return True
def check_for_forced_assignments(dungeon_map, candidate_sectors, global_pole):
done = False
while not done:
done = True
magnitude = sum_hook_magnitude(candidate_sectors)
dungeon_hooks = {}
for name, builder in dungeon_map.items():
dungeon_hooks[name] = sum_hook_magnitude(builder.sectors)
for val in Hook:
if magnitude[val.value] == 1:
forced_sector = None
for sec in candidate_sectors:
if sec.hook_magnitude()[val.value] > 0:
forced_sector = sec
break
opp = opposite_h_type(val).value
other_sectors = [x for x in candidate_sectors if x != forced_sector]
if sum_hook_magnitude(other_sectors)[opp] == 0:
found_hooks = []
for name, hooks in dungeon_hooks.items():
if hooks[opp] > 0 and not dungeon_map[name].c_locked:
found_hooks.append(name)
if len(found_hooks) == 1:
done = False
assign_sector(forced_sector, dungeon_map[found_hooks[0]], candidate_sectors, global_pole)
def check_for_forced_crystal(dungeon_map, candidate_sectors, global_pole):
for name, builder in dungeon_map.items():
if check_for_forced_crystal_single(builder, candidate_sectors):
builder.c_switch_required = True
def check_for_forced_crystal_single(builder, candidate_sectors):
builder_doors = defaultdict(dict)
for sector in builder.sectors:
for door in sector.outstanding_doors:
builder_doors[hook_from_door(door)][door] = sector
if len(builder_doors) == 0:
return False
candidate_doors = defaultdict(dict)
for sector in candidate_sectors:
for door in sector.outstanding_doors:
candidate_doors[hook_from_door(door)][door] = sector
for hook in builder_doors.keys():
for door in builder_doors[hook].keys():
opp = opposite_h_type(hook)
if opp in builder_doors.keys():
for d, sector in builder_doors[opp].items():
if d != door and (not sector.blue_barrier or sector.c_switch):
return False
for d, sector in candidate_doors[opp].items():
if not sector.blue_barrier or sector.c_switch:
return False
return True
def categorize_sectors(candidate_sectors):
crystal_switches = {}
crystal_barriers = {}
polarized_sectors = {}
neutral_sectors = {}
for sector in candidate_sectors:
if sector.c_switch:
crystal_switches[sector] = None
elif sector.blue_barrier:
crystal_barriers[sector] = None
elif sector.polarity().is_neutral():
neutral_sectors[sector] = None
else:
polarized_sectors[sector] = None
return crystal_switches, crystal_barriers, neutral_sectors, polarized_sectors
class NeutralizingException(Exception):
pass
class GenerationException(Exception):
pass
class DoorEquation:
def __init__(self, door):
self.door = door
self.cost = None, None
self.benefit = defaultdict(list)
self.required = False
self.access_id = None
self.c_switch = False
self.crystal_blocked = {}
self.entrance_flag = False
def copy(self):
eq = DoorEquation(self.door)
eq.cost = self.cost
for key, doors in self.benefit.items():
eq.benefit[key] = doors.copy()
eq.required = self.required
eq.c_switch = self.c_switch
eq.crystal_blocked = self.crystal_blocked.copy()
return eq
def total_cost(self):
return 0 if self.cost[0] is None else 1
def gross(self, current_access):
key, cost_door = self.cost
if key is None:
# todo: could just be Orange as well (multiple entrance case)
crystal_access = current_access.access_door[None]
else:
crystal_access = None
for match_door, crystal in current_access.outstanding_doors.items():
if hook_from_door(match_door) == key:
if crystal_access is None or current_access._better_crystal(crystal_access, crystal):
crystal_access = crystal
ttl = 0
for key, door_list in self.benefit.items():
for door in door_list:
if door in current_access.outstanding_doors.keys() or door in current_access.proposed_connections.keys():
continue
if door in self.crystal_blocked.keys() and not self.c_switch:
if crystal_access == CrystalBarrier.Either or crystal_access == self.crystal_blocked[door]:
ttl += 1
else:
ttl += 1
return ttl
def profit(self, current_access):
return self.gross(current_access) - self.total_cost()
def neutral(self):
key, door = self.cost
if key is not None and len(self.benefit[key]) <= 0:
return False
return True
def neutral_profit(self):
key, door = self.cost
if key is not None:
if len(self.benefit[key]) < 1:
return False
if len(self.benefit[key]) > 1:
return True
return False
else:
return True
def can_cover_cost(self, current_access):
key, door = self.cost
if key is not None and current_access[key] < 1:
return False
return True
class DungeonAccess:
def __init__(self):
self.access = defaultdict(int)
self.door_access = {} # door -> crystal
self.door_sector_map = {} # door -> original sector
self.outstanding_doors = {}
self.blocked_doors = {}
self.door_access[None] = CrystalBarrier.Orange
self.proposed_connections = {}
self.reached_doors = set()
def can_cover_equation(self, equation):
key, door = equation.cost
if key is None:
return True
return self.access[key] >= 1
def can_pay(self, key):
if key is None:
return True
return self.access[key] >= 1
def adjust_for_equation(self, equation, sector):
if equation.cost[0] is None:
original_crystal = self.door_access[None]
for key, door_list in equation.benefit.items():
self.access[key] += len(door_list)
for door in door_list:
# I can't think of an entrance sector that forces blue
crystal_state = CrystalBarrier.Either if equation.c_switch else original_crystal
if crystal_state == CrystalBarrier.Either:
self.door_access[None] = CrystalBarrier.Either
self.door_access[door] = crystal_state
self.door_sector_map[door] = sector
self.outstanding_doors[door] = crystal_state
self.reached_doors.add(door)
else:
key, door = equation.cost
self.access[key] -= 1
# find the a matching connection
best_door, best_crystal = None, None
for match_door, crystal in self.outstanding_doors.items():
if hook_from_door(match_door) == key:
if best_door is None or self._better_crystal(best_crystal, crystal):
best_door = match_door
best_crystal = crystal
if best_door is None:
raise Exception('Something went terribly wrong I think')
# for match_door, crystal in self.blocked_doors.items():
# if hook_from_door(match_door) == key:
# if best_door is None or self._better_crystal(best_crystal, crystal):
# best_door = match_door
# best_crystal = crystal
self.door_sector_map[door] = sector
self.door_access[door] = best_crystal
self.reached_doors.add(door)
self.proposed_connections[door] = best_door
self.proposed_connections[best_door] = door
if best_door in self.outstanding_doors.keys():
del self.outstanding_doors[best_door]
elif best_door in self.blocked_doors.keys():
del self.blocked_doors[best_door]
self.reached_doors.add(best_door)
# todo: backpropagate crystal access
if equation.c_switch or best_crystal == CrystalBarrier.Either:
# if not equation.door.blocked:
self.door_access[door] = CrystalBarrier.Either
self.door_access[best_door] = CrystalBarrier.Either
queue = deque([best_door, door])
visited = set()
while len(queue) > 0:
next_door = queue.popleft()
visited.add(next_door)
curr_sector = self.door_sector_map[next_door]
next_eq = None
for eq in curr_sector.equations:
if eq.door == next_door:
next_eq = eq
break
if next_eq.entrance_flag:
crystal_state = self.door_access[next_door]
self.door_access[None] = crystal_state
for eq in curr_sector.equations:
cand_door = eq.door
crystal_state = self.door_access[None]
if cand_door in next_eq.crystal_blocked.keys():
crystal_state = next_eq.crystal_blocked[cand_door]
if cand_door not in visited:
self.door_access[cand_door] = crystal_state
if not cand_door.blocked:
if cand_door in self.outstanding_doors.keys():
self.outstanding_doors[cand_door] = crystal_state
if cand_door in self.proposed_connections.keys():
partner_door = self.proposed_connections[cand_door]
self.door_access[partner_door] = crystal_state
if partner_door in self.outstanding_doors.keys():
self.outstanding_doors[partner_door] = crystal_state
if partner_door not in visited:
queue.append(partner_door)
else:
for key, door_list in next_eq.benefit.items():
for cand_door in door_list:
crystal_state = self.door_access[next_door]
if cand_door in next_eq.crystal_blocked.keys():
crystal_state = next_eq.crystal_blocked[cand_door]
if cand_door in self.blocked_doors.keys():
needed_crystal = self.blocked_doors[cand_door]
if meets_crystal_requirment(crystal_state, needed_crystal):
del self.blocked_doors[cand_door]
if cand_door != door:
self.access[key] += 1
self.outstanding_doors[cand_door] = crystal_state
self.door_access[cand_door] = crystal_state
self.reached_doors.add(cand_door)
if cand_door not in visited:
self.door_access[cand_door] = crystal_state
if not cand_door.blocked:
if cand_door in self.outstanding_doors.keys():
self.outstanding_doors[cand_door] = crystal_state
if cand_door in self.proposed_connections.keys():
partner_door = self.proposed_connections[cand_door]
self.door_access[partner_door] = crystal_state
if partner_door in self.outstanding_doors.keys():
self.outstanding_doors[partner_door] = crystal_state
queue.append(cand_door)
queue.append(partner_door)
for key, door_list in equation.benefit.items():
for door in door_list:
crystal_access = self.door_access[best_door]
can_access = True
if door in equation.crystal_blocked.keys():
if crystal_access == CrystalBarrier.Either or crystal_access == equation.crystal_blocked[door]:
crystal_access = equation.crystal_blocked[door]
else:
self.blocked_doors[door] = equation.crystal_blocked[door]
can_access = False
self.door_sector_map[door] = sector
if can_access and door not in self.reached_doors:
self.access[key] += 1
self.door_access[door] = crystal_access
self.outstanding_doors[door] = crystal_access
self.reached_doors.add(door)
def _better_crystal(self, current_champ, contender):
if current_champ == CrystalBarrier.Either:
return False
elif contender == CrystalBarrier.Either:
return True
elif current_champ == CrystalBarrier.Blue:
return False
elif contender == CrystalBarrier.Blue:
return True
else:
return False
def identify_branching_issues(dungeon_map, builder_info):
unconnected_builders = {}
for name, builder in dungeon_map.items():
resolved, unreached_doors = check_for_valid_layout(builder, [], builder_info)
if not resolved:
unconnected_builders[name] = builder
for hook, door_list in unreached_doors.items():
builder.unfulfilled[hook] += len(door_list)
return unconnected_builders
def check_for_valid_layout(builder, sector_list, builder_info):
dungeon_entrances, split_dungeon_entrances, c_tuple, world, player = builder_info
if builder.name in split_dungeon_entrances.keys():
try:
temp_builder = DungeonBuilder(builder.name)
for s in sector_list + builder.sectors:
assign_sector_helper(s, temp_builder)
split_list = split_dungeon_entrances[builder.name]
builder.split_dungeon_map = split_dungeon_builder(temp_builder, split_list, builder_info)
builder.valid_proposal = {}
possible_regions = set()
for portal in world.dungeon_portals[player]:
if not portal.destination and portal.name in dungeon_portals[builder.name]:
possible_regions.add(portal.door.entrance.parent_region.name)
if builder.name in dungeon_drops.keys():
possible_regions.update(dungeon_drops[builder.name])
for name, split_build in builder.split_dungeon_map.items():
name_bits = name.split(" ")
orig_name = " ".join(name_bits[:-1])
entrance_regions = split_dungeon_entrances[orig_name][name_bits[-1]]
# todo: this is hardcoded information for random entrances
for sector in split_build.sectors:
match_set = set(sector.region_set()).intersection(possible_regions)
if len(match_set) > 0:
for r_name in match_set:
if r_name not in entrance_regions:
entrance_regions.append(r_name)
# entrance_regions = [x for x in entrance_regions if x not in split_check_entrance_invalid]
proposal = generate_dungeon_find_proposal(split_build, entrance_regions, True, world, player)
# record split proposals
builder.valid_proposal[name] = proposal
builder.exception_list = list(sector_list)
return True, {}
except (GenerationException, NeutralizingException):
builder.split_dungeon_map = None
builder.valid_proposal = None
unreached_doors = resolve_equations(builder, sector_list)
return False, unreached_doors
else:
unreached_doors = resolve_equations(builder, sector_list)
return len(unreached_doors) == 0, unreached_doors
def resolve_equations(builder, sector_list):
unreached_doors = defaultdict(list)
equations = {x: y for x, y in copy_door_equations(builder, sector_list).items() if len(y) > 0}
current_access = {}
sector_split = {} # those sectors that belong to a certain sector
if builder.name in split_region_starts.keys():
for name, region_list in split_region_starts[builder.name].items():
current_access[name] = DungeonAccess()
for r_name in region_list:
sector = find_sector(r_name, builder.sectors)
sector_split[sector] = name
else:
current_access[builder.name] = DungeonAccess()
# resolve all that provide more access
free_sector, eq_list, free_eq = find_free_equation(equations)
while free_eq is not None:
if free_sector in sector_split.keys():
access_id = sector_split[free_sector]
access = current_access[access_id]
else:
access_id = next(iter(current_access.keys()))
access = current_access[access_id]
resolve_equation(free_eq, eq_list, free_sector, access_id, access, equations)
free_sector, eq_list, free_eq = find_free_equation(equations)
while len(equations) > 0:
valid_access = next_access(current_access)
eq, eq_list, sector, access, access_id = None, None, None, None, None
if len(valid_access) == 1:
access_id, access = valid_access[0]
eq, eq_list, sector = find_priority_equation(equations, access_id, access)
elif len(valid_access) > 1:
access_id, access = valid_access[0]
eq, eq_list, sector = find_greedy_equation(equations, access_id, access, sector_split)
if eq:
resolve_equation(eq, eq_list, sector, access_id, access, equations)
else:
for sector, eq_list in equations.items():
for eq in eq_list:
unreached_doors[hook_from_door(eq.door)].append(eq.door)
return unreached_doors
valid_access = next_access(current_access)
for access_id, dungeon_access in valid_access:
access = dungeon_access.access
access[Hook.Stairs] = access[Hook.Stairs] % 2
ns_leftover = min(access[Hook.North], access[Hook.South])
access[Hook.North] -= ns_leftover
access[Hook.South] -= ns_leftover
ew_leftover = min(access[Hook.West], access[Hook.East])
access[Hook.East] -= ew_leftover
access[Hook.West] -= ew_leftover
if sum(access.values()) > 0:
for hook, num in access.items():
for i in range(num):
unreached_doors[hook].append('placeholder')
return unreached_doors
def next_access(current_access):
valid_ones = [(x, y) for x, y in current_access.items() if sum(y.access.values()) > 0]
valid_ones.sort(key=lambda x: sum(x[1].access.values()))
return valid_ones
# an equations with no change to access (check)
# the highest benefit equations, that can be paid for (check)
# 0-benefit required transforms
# 0-benefit transforms (how to pick between these?)
# negative benefit transforms (dead end)
def find_priority_equation(equations, access_id, current_access):
flex = calc_flex(equations, current_access)
required = calc_required(equations, current_access)
wanted_candidates = []
best_profit = None
all_candidates = []
local_profit_map = {}
for sector, eq_list in equations.items():
eq_list.sort(key=lambda eq: eq.profit(current_access), reverse=True)
best_local_profit = None
for eq in eq_list:
profit = eq.profit(current_access)
if current_access.can_cover_equation(eq) and (eq.access_id is None or eq.access_id == access_id):
# if eq.neutral_profit() or eq.neutral():
# return eq, eq_list, sector # don't need to compare - just use it now
if best_local_profit is None or profit > best_local_profit:
best_local_profit = profit
all_candidates.append((eq, eq_list, sector))
elif (best_profit is None or profit >= best_profit) and profit > 0:
if best_profit is None or profit > best_profit:
wanted_candidates = [eq]
best_profit = profit
else:
wanted_candidates.append(eq)
local_profit_map[sector] = best_local_profit
filtered_candidates = filter_requirements(all_candidates, equations, required, current_access)
filtered_candidates = [x for x in filtered_candidates if x[0].gross(current_access) > 0]
if len(filtered_candidates) == 0:
filtered_candidates = all_candidates # probably bad things
if len(filtered_candidates) == 0:
return None, None, None # can't pay for anything
if len(filtered_candidates) == 1:
return filtered_candidates[0]
neutral_candidates = [x for x in filtered_candidates if (x[0].neutral_profit() or x[0].neutral()) and x[0].profit(current_access) == local_profit_map[x[2]]]
if len(neutral_candidates) == 0:
neutral_candidates = filtered_candidates
if len(neutral_candidates) == 1:
return neutral_candidates[0]
filtered_candidates = filter_requirements(neutral_candidates, equations, required, current_access)
if len(filtered_candidates) == 0:
filtered_candidates = neutral_candidates
if len(filtered_candidates) == 1:
return filtered_candidates[0]
triplet_candidates = []
best_profit = None
for eq, eq_list, sector in filtered_candidates:
profit = eq.profit(current_access)
if best_profit is None or profit >= best_profit:
if best_profit is None or profit > best_profit:
triplet_candidates = [(eq, eq_list, sector)]
best_profit = profit
else:
triplet_candidates.append((eq, eq_list, sector))
filtered_candidates = filter_requirements(triplet_candidates, equations, required, current_access)
if len(filtered_candidates) == 0:
filtered_candidates = triplet_candidates
if len(filtered_candidates) == 1:
return filtered_candidates[0]
required_candidates = [x for x in filtered_candidates if x[0].required]
if len(required_candidates) == 0:
required_candidates = filtered_candidates
if len(required_candidates) == 1:
return required_candidates[0]
c_switch_candidates = [x for x in required_candidates if x[0].c_switch]
if len(c_switch_candidates) == 0:
c_switch_candidates = required_candidates
if len(c_switch_candidates) == 1:
return c_switch_candidates[0]
loop_candidates = find_enabling_switch_connections(current_access)
if len(loop_candidates) >= 1:
return loop_candidates[0] # just pick one
flexible_candidates = [x for x in c_switch_candidates if x[0].can_cover_cost(flex)]
if len(flexible_candidates) == 0:
flexible_candidates = c_switch_candidates
if len(flexible_candidates) == 1:
return flexible_candidates[0]
good_local_candidates = [x for x in flexible_candidates if local_profit_map[x[2]] == x[0].profit(current_access)]
if len(good_local_candidates) == 0:
good_local_candidates = flexible_candidates
if len(good_local_candidates) == 1:
return good_local_candidates[0]
leads_to_profit = [x for x in good_local_candidates if can_enable_wanted(x[0], wanted_candidates)]
if len(leads_to_profit) == 0:
leads_to_profit = good_local_candidates
if len(leads_to_profit) == 1:
return leads_to_profit[0]
cost_point = {x[0]: find_cost_point(x, current_access) for x in leads_to_profit}
best_point = max(cost_point.values())
cost_point_candidates = [x for x in leads_to_profit if cost_point[x[0]] == best_point]
if len(cost_point_candidates) == 0:
cost_point_candidates = leads_to_profit
return cost_point_candidates[0] # just pick one I guess
def find_enabling_switch_connections(current_access):
triad_list = []
# probably should check for loop/branches in builder at some stage
# - but this could indicate that a loop or branch is necessary
for cand_door, crystal in current_access.outstanding_doors.items():
for blocked_door, req_crystal in current_access.blocked_doors.items():
if hook_from_door(cand_door) == hanger_from_door(blocked_door):
if crystal == CrystalBarrier.Either or crystal == req_crystal:
sector, equation = current_access.door_sector_map[blocked_door], None
for eq in sector.equations:
if eq.door == blocked_door:
equation = eq.copy()
break
if equation:
triad_list.append((equation, [equation], sector))
return triad_list
def find_cost_point(eq_triplet, access):
cost_point = 0
key, cost_door = eq_triplet[0].cost
if cost_door is not None:
cost_point += access.access[key] - 1
return cost_point
def find_greedy_equation(equations, access_id, current_access, sector_split):
all_candidates = []
for sector, eq_list in equations.items():
if sector not in sector_split.keys() or sector_split[sector] == access_id:
eq_list.sort(key=lambda eq: eq.profit(current_access), reverse=True)
for eq in eq_list:
if current_access.can_cover_equation(eq) and (eq.access_id is None or eq.access_id == access_id):
all_candidates.append((eq, eq_list, sector))
if len(all_candidates) == 0:
return None, None, None # can't pay for anything
if len(all_candidates) == 1:
return all_candidates[0]
filtered_candidates = [x for x in all_candidates if x[0].profit(current_access) + 2 >= len(x[2].outstanding_doors)]
if len(filtered_candidates) == 0:
filtered_candidates = all_candidates # terrible! ugly dead ends
if len(filtered_candidates) == 1:
return filtered_candidates[0]
triplet_candidates = []
worst_profit = None
for eq, eq_list, sector in filtered_candidates:
profit = eq.profit(current_access)
if worst_profit is None or profit <= worst_profit:
if worst_profit is None or profit < worst_profit:
triplet_candidates = [(eq, eq_list, sector)]
worst_profit = profit
else:
triplet_candidates.append((eq, eq_list, sector))
if len(triplet_candidates) == 0:
triplet_candidates = filtered_candidates # probably bad things
return triplet_candidates[0] # just pick one?
def calc_required(equations, current_access):
ttl = sum(current_access.access.values())
local_profit_map = {}
for sector, eq_list in equations.items():
best_local_profit = None
for eq in eq_list:
profit = eq.profit(current_access)
if best_local_profit is None or profit > best_local_profit:
best_local_profit = profit
local_profit_map[sector] = best_local_profit
ttl += best_local_profit
if ttl == 0:
new_lists = {}
for sector, eq_list in equations.items():
if len(eq_list) > 1:
rem_list = []
for eq in eq_list:
if eq.profit(current_access) < local_profit_map[sector]:
rem_list.append(eq)
if len(rem_list) > 0:
new_lists[sector] = [x for x in eq_list if x not in rem_list]
for sector, eq_list in new_lists.items():
if len(eq_list) <= 1:
for eq in eq_list:
eq.required = True
equations[sector] = eq_list
required_costs = defaultdict(int)
required_benefits = defaultdict(int)
for sector, eq_list in equations.items():
for eq in eq_list:
if eq.required:
key, door = eq.cost
required_costs[key] += 1
for key, door_list in eq.benefit.items():
required_benefits[key] += len(door_list)
return required_costs, required_benefits
def calc_flex(equations, current_access):
flex_spending = defaultdict(int)
required_costs = defaultdict(int)
for sector, eq_list in equations.items():
for eq in eq_list:
if eq.required:
key, door = eq.cost
required_costs[key] += 1
for key in Hook:
flex_spending[key] = max(0, current_access.access[key]-required_costs[key])
return flex_spending
def filter_requirements(triplet_candidates, equations, required, current_access):
r_costs, r_exits = required
valid_candidates = []
for cand, cand_list, cand_sector in triplet_candidates:
valid = True
if not cand.required and not cand.c_switch:
potential_benefit = defaultdict(int)
benefit_counted = set()
potential_costs = defaultdict(int)
for h_type, benefit in current_access.access.items():
cur_cost = 1 if cand.cost[0] is not None else 0
if benefit - cur_cost > 0:
potential_benefit[h_type] += benefit - cur_cost
for h_type, benefit_list in cand.benefit.items():
potential_benefit[h_type] += len(benefit_list)
for sector, eq_list in equations.items():
if sector == cand_sector:
affected_doors = [d for x in cand.benefit.values() for d in x] + [cand.cost[1]]
adj_list = [x for x in eq_list if x.door not in affected_doors]
else:
adj_list = eq_list
for eq in adj_list:
for h_type, benefit_list in eq.benefit.items():
total_benefit = set(benefit_list) - benefit_counted
potential_benefit[h_type] += len(total_benefit)
benefit_counted.update(benefit_list)
h_type, cost_door = eq.cost
potential_costs[h_type] += 1
for h_type, requirement in r_costs.items():
if requirement > 0 and potential_benefit[h_type] < requirement:
valid = False
break
if valid:
for h_type, requirement in r_exits.items():
if requirement > 0 and potential_costs[h_type] < requirement:
valid = False
break
if valid:
valid_candidates.append((cand, cand_list, cand_sector))
return valid_candidates
def can_enable_wanted(test_eq, wanted_candidates):
for wanted in wanted_candidates:
covered = True
key, cost_door = wanted.cost
if len(test_eq.benefit[key]) < 1:
covered = False
if covered:
return True
return False
def resolve_equation(equation, eq_list, sector, access_id, current_access, equations):
if not current_access.can_pay(equation.cost[0]):
raise GenerationException('Cannot pay for this connection')
current_access.adjust_for_equation(equation, sector)
eq_list.remove(equation)
reached_doors = set(current_access.reached_doors)
reached_doors.update(current_access.blocked_doors.keys())
for r_eq in list(eq_list):
all_benefits_met = r_eq.door in reached_doors
for key in Hook:
fringe_list = [x for x in r_eq.benefit[key] if x not in reached_doors]
r_eq.benefit[key] = fringe_list
if len(fringe_list) > 0:
all_benefits_met = False
if all_benefits_met:
eq_list.remove(r_eq)
if len(eq_list) == 0 and sector in equations.keys():
del equations[sector]
else:
for eq in eq_list:
eq.access_id = access_id
def find_free_equation(equations):
for sector, eq_list in equations.items():
for eq in eq_list:
if eq.total_cost() <= 0:
return sector, eq_list, eq
return None, None, None
def copy_door_equations(builder, sector_list):
equations = {}
for sector in builder.sectors + sector_list:
if sector.equations is None:
# todo: sort equations?
sector.equations = calc_sector_equations(sector)
curr_list = equations[sector] = []
for equation in sector.equations:
curr_list.append(equation.copy())
return equations
def calc_sector_equations(sector):
equations = []
is_entrance = sector.is_entrance_sector() and not sector.destination_entrance
if is_entrance:
flagged_equations = []
for door in sector.outstanding_doors:
equation, flag = calc_door_equation(door, sector, True)
if flag:
flagged_equations.append(equation)
equations.append(equation)
for flagged_equation in flagged_equations:
for equation in equations:
for key, door_list in equation.benefit.items():
if flagged_equation.door in door_list and flagged_equation != equation:
door_list.remove(flagged_equation.door)
else:
for door in sector.outstanding_doors:
equation, flag = calc_door_equation(door, sector, False)
equations.append(equation)
return equations
def calc_door_equation(door, sector, look_for_entrance):
if look_for_entrance and not door.blocked:
flag = sector.is_entrance_sector()
if flag:
eq = DoorEquation(door)
eq.benefit[hook_from_door(door)].append(door)
eq.required = True
eq.c_switch = door.crystal == CrystalBarrier.Either
# exceptions for long entrances ???
# if door.name in ['PoD Dark Alley']:
eq.entrance_flag = True
return eq, flag
eq = DoorEquation(door)
eq.required = door.blocked or door.dead
eq.cost = (hanger_from_door(door), door)
eq.entrance_flag = sector.is_entrance_sector()
if not door.stonewall:
start_region = door.entrance.parent_region
visited = {(start_region, CrystalBarrier.Null)}
queue = deque([(start_region, CrystalBarrier.Null)])
found_events = set()
event_doors = set()
while len(queue) > 0:
region, crystal_barrier = queue.popleft()
if region.crystal_switch and crystal_barrier == CrystalBarrier.Null:
eq.c_switch = True
crystal_barrier = CrystalBarrier.Either
# todo: backtracking from double switch with orange on--
for loc in region.locations:
if loc.name in dungeon_events:
found_events.add(loc.name)
for d in event_doors:
if loc.name == d.req_event:
connect = d.entrance.connected_region
if connect is not None and connect.type == RegionType.Dungeon and valid_crystal(d, crystal_barrier):
cb_flag = crystal_barrier if d.crystal == CrystalBarrier.Null else d.crystal
cb_flag = CrystalBarrier.Null if cb_flag == CrystalBarrier.Either else cb_flag
if (connect, cb_flag) not in visited:
visited.add((connect, cb_flag))
queue.append((connect, cb_flag))
for ext in region.exits:
d = ext.door
if d is not None:
if d.controller is not None:
d = d.controller
if d is not door and d in sector.outstanding_doors and not d.blocked:
eq_list = eq.benefit[hook_from_door(d)]
if d not in eq_list:
eq_list.append(d)
crystal_barrier = crystal_barrier if d.crystal == CrystalBarrier.Null else d.crystal
if crystal_barrier != CrystalBarrier.Null:
if d in eq.crystal_blocked.keys() and eq.crystal_blocked[d] != crystal_barrier:
del eq.crystal_blocked[d]
else:
eq.crystal_blocked[d] = crystal_barrier
elif d.crystal == CrystalBarrier.Null:
if d in eq.crystal_blocked.keys() and eq.crystal_blocked[d] != crystal_barrier:
del eq.crystal_blocked[d]
if d.req_event is not None and d.req_event not in found_events:
event_doors.add(d)
else:
connect = ext.connected_region if ext.door.controller is None else d.entrance.parent_region
if connect is not None and connect.type == RegionType.Dungeon and valid_crystal(d, crystal_barrier):
cb_flag = crystal_barrier if d.crystal == CrystalBarrier.Null else d.crystal
cb_flag = CrystalBarrier.Null if cb_flag == CrystalBarrier.Either else cb_flag
if (connect, cb_flag) not in visited:
visited.add((connect, cb_flag))
queue.append((connect, cb_flag))
if len(eq.benefit) == 0:
eq.required = True
return eq, False
def meets_crystal_requirment(current_crystal, requirement):
if current_crystal == CrystalBarrier.Either:
return True
return current_crystal == requirement
def valid_crystal(door, current_crystal):
if door.crystal in [CrystalBarrier.Null, CrystalBarrier.Either]:
return True
if current_crystal in [CrystalBarrier.Either, CrystalBarrier.Null]:
return True
return door.crystal == current_crystal
# common functions - todo: move to a common place
def kth_combination(k, l, r):
if r == 0:
return []
elif len(l) == r:
return l
else:
i = ncr(len(l) - 1, r - 1)
if k < i:
return l[0:1] + kth_combination(k, l[1:], r - 1)
else:
return kth_combination(k - i, l[1:], r)
def ncr(n, r):
if r == 0:
return 1
r = min(r, n - r)
numerator = reduce(op.mul, range(n, n - r, -1), 1)
denominator = reduce(op.mul, range(1, r + 1), 1)
return int(numerator / denominator)
dungeon_boss_sectors = {
'Hyrule Castle': [],
'Eastern Palace': ['Eastern Boss'],
'Desert Palace': ['Desert Boss'],
'Tower of Hera': ['Hera Boss'],
'Agahnims Tower': ['Tower Agahnim 1'],
'Palace of Darkness': ['PoD Boss'],
'Swamp Palace': ['Swamp Boss'],
'Skull Woods': ['Skull Boss'],
'Thieves Town': ['Thieves Blind\'s Cell', 'Thieves Boss'],
'Ice Palace': ['Ice Boss'],
'Misery Mire': ['Mire Boss'],
'Turtle Rock': ['TR Boss'],
'Ganons Tower': ['GT Agahnim 2']
}
default_dungeon_entrances = {
'Hyrule Castle': ['Hyrule Castle Lobby', 'Hyrule Castle West Lobby', 'Hyrule Castle East Lobby', 'Sewers Rat Path',
'Sanctuary'],
'Eastern Palace': ['Eastern Lobby'],
'Desert Palace': ['Desert Back Lobby', 'Desert Main Lobby', 'Desert West Lobby', 'Desert East Lobby'],
'Tower of Hera': ['Hera Lobby'],
'Agahnims Tower': ['Tower Lobby'],
'Palace of Darkness': ['PoD Lobby'],
'Swamp Palace': ['Swamp Lobby'],
'Skull Woods': ['Skull 1 Lobby', 'Skull Pinball', 'Skull Left Drop', 'Skull Pot Circle', 'Skull 2 East Lobby',
'Skull 2 West Lobby', 'Skull Back Drop', 'Skull 3 Lobby'],
'Thieves Town': ['Thieves Lobby'],
'Ice Palace': ['Ice Lobby'],
'Misery Mire': ['Mire Lobby'],
'Turtle Rock': ['TR Main Lobby', 'TR Eye Bridge', 'TR Big Chest Entrance', 'TR Lazy Eyes'],
'Ganons Tower': ['GT Lobby']
}
drop_entrances = {
'Hyrule Castle': ['Sewers Rat Path'],
'Eastern Palace': [],
'Desert Palace': [],
'Tower of Hera': [],
'Agahnims Tower': [],
'Palace of Darkness': [],
'Swamp Palace': [],
'Skull Woods': ['Skull Pinball', 'Skull Left Drop', 'Skull Pot Circle', 'Skull Back Drop'],
'Thieves Town': [],
'Ice Palace': [],
'Misery Mire': [],
'Turtle Rock': [],
'Ganons Tower': []
}
# todo: calculate these for ER - the multi entrance dungeons anyway
dungeon_dead_end_allowance = {
'Hyrule Castle': 6,
'Eastern Palace': 1,
'Desert Palace': 2,
'Tower of Hera': 1,
'Agahnims Tower': 1,
'Palace of Darkness': 1,
'Swamp Palace': 1,
'Skull Woods': 3, # two allowed in skull 1, 1 in skull 3, 0 in skull 2
'Thieves Town': 1,
'Ice Palace': 1,
'Misery Mire': 1,
'Turtle Rock': 2, # this assumes one overworld connection
'Ganons Tower': 1,
'Desert Palace Back': 1,
'Desert Palace Main': 1,
'Skull Woods 1': 0,
'Skull Woods 2': 0,
'Skull Woods 3': 1,
}
drop_entrances_allowance = [
'Sewers Rat Path', 'Skull Pinball', 'Skull Left Drop', 'Skull Pot Circle', 'Skull Back Drop'
]
dead_entrances = [
'TR Big Chest Entrance'
]
split_check_entrance_invalid = [
'Desert East Lobby', 'Skull 2 West Lobby'
]
dungeon_portals = {
'Hyrule Castle': ['Hyrule Castle South', 'Hyrule Castle West', 'Hyrule Castle East', 'Sanctuary'],
'Eastern Palace': ['Eastern'],
'Desert Palace': ['Desert Back', 'Desert South', 'Desert West', 'Desert East'],
'Tower of Hera': ['Hera'],
'Agahnims Tower': ['Agahnims Tower'],
'Palace of Darkness': ['Palace of Darkness'],
'Swamp Palace': ['Swamp'],
'Skull Woods': ['Skull 1', 'Skull 2 East', 'Skull 2 West', 'Skull 3'],
'Thieves Town': ['Thieves Town'],
'Ice Palace': ['Ice'],
'Misery Mire': ['Mire'],
'Turtle Rock': ['Turtle Rock Main', 'Turtle Rock Lazy Eyes', 'Turtle Rock Chest', 'Turtle Rock Eye Bridge'],
'Ganons Tower': ['Ganons Tower']
}
dungeon_drops = {
'Hyrule Castle': ['Sewers Rat Path'],
'Skull Woods': ['Skull Pot Circle', 'Skull Pinball', 'Skull Left Drop', 'Skull Back Drop'],
}
| 44.544991
| 160
| 0.614152
|
import RaceRandom as random
import collections
import itertools
from collections import defaultdict, deque
from functools import reduce
import logging
import math
import operator as op
import time
from typing import List
from BaseClasses import DoorType, Direction, CrystalBarrier, RegionType, Polarity, PolSlot, flooded_keys, Sector
from BaseClasses import Hook, hook_from_door
from Regions import dungeon_events, flooded_keys_reverse
from Dungeons import dungeon_regions, split_region_starts
from RoomData import DoorKind
class GraphPiece:
def __init__(self):
self.hanger_info = None
self.hanger_crystal = None
self.hooks = {}
self.visited_regions = set()
self.possible_bk_locations = set()
self.pinball_used = False
def pre_validate(builder, entrance_region_names, split_dungeon, world, player):
entrance_regions = convert_regions(entrance_region_names, world, player)
excluded = {}
for region in entrance_regions:
portal = next((x for x in world.dungeon_portals[player] if x.door.entrance.parent_region == region), None)
if portal and portal.destination:
excluded[region] = None
entrance_regions = [x for x in entrance_regions if x not in excluded.keys()]
proposed_map = {}
doors_to_connect = {}
all_regions = set()
bk_needed = False
bk_special = False
for sector in builder.sectors:
for door in sector.outstanding_doors:
doors_to_connect[door.name] = door
all_regions.update(sector.regions)
bk_needed = bk_needed or determine_if_bk_needed(sector, split_dungeon, world, player)
bk_special = bk_special or check_for_special(sector)
paths = determine_paths_for_dungeon(world, player, all_regions, builder.name)
dungeon, hangers, hooks = gen_dungeon_info(builder.name, builder.sectors, entrance_regions, all_regions,
proposed_map, doors_to_connect, bk_needed, bk_special, world, player)
return check_valid(builder.name, dungeon, hangers, hooks, proposed_map, doors_to_connect, all_regions,
bk_needed, bk_special, paths, entrance_regions, world, player)
def generate_dungeon(builder, entrance_region_names, split_dungeon, world, player):
stonewalls = check_for_stonewalls(builder)
sector = generate_dungeon_main(builder, entrance_region_names, split_dungeon, world, player)
for stonewall in stonewalls:
if not stonewall_valid(stonewall):
builder.pre_open_stonewalls.add(stonewall)
return sector
def check_for_stonewalls(builder):
stonewalls = set()
for sector in builder.sectors:
for door in sector.outstanding_doors:
if door.stonewall:
stonewalls.add(door)
return stonewalls
def generate_dungeon_main(builder, entrance_region_names, split_dungeon, world, player):
if builder.valid_proposal: # we made this earlier in gen, just use it
proposed_map = builder.valid_proposal
else:
proposed_map = generate_dungeon_find_proposal(builder, entrance_region_names, split_dungeon, world, player)
builder.valid_proposal = proposed_map
queue = collections.deque(proposed_map.items())
while len(queue) > 0:
a, b = queue.popleft()
connect_doors(a, b)
queue.remove((b, a))
if len(builder.sectors) == 0:
return Sector()
available_sectors = list(builder.sectors)
master_sector = available_sectors.pop()
for sub_sector in available_sectors:
master_sector.regions.extend(sub_sector.regions)
master_sector.outstanding_doors.clear()
master_sector.r_name_set = None
return master_sector
def generate_dungeon_find_proposal(builder, entrance_region_names, split_dungeon, world, player):
logger = logging.getLogger('')
name = builder.name
entrance_regions = convert_regions(entrance_region_names, world, player)
excluded = {}
for region in entrance_regions:
portal = next((x for x in world.dungeon_portals[player] if x.door.entrance.parent_region == region), None)
if portal and portal.destination:
excluded[region] = None
entrance_regions = [x for x in entrance_regions if x not in excluded.keys()]
doors_to_connect = {}
all_regions = set()
bk_needed = False
bk_special = False
for sector in builder.sectors:
for door in sector.outstanding_doors:
doors_to_connect[door.name] = door
all_regions.update(sector.regions)
bk_needed = bk_needed or determine_if_bk_needed(sector, split_dungeon, world, player)
bk_special = bk_special or check_for_special(sector)
proposed_map = {}
choices_master = [[]]
depth = 0
dungeon_cache = {}
backtrack = False
itr = 0
attempt = 1
finished = False
# flag if standard and this is hyrule castle
paths = determine_paths_for_dungeon(world, player, all_regions, name)
while not finished:
# what are my choices?
itr += 1
if itr > 1000:
if attempt > 9:
raise GenerationException('Generation taking too long. Ref %s' % name)
proposed_map = {}
choices_master = [[]]
depth = 0
dungeon_cache = {}
backtrack = False
itr = 0
attempt += 1
logger.debug(f'Starting new attempt {attempt}')
if depth not in dungeon_cache.keys():
dungeon, hangers, hooks = gen_dungeon_info(name, builder.sectors, entrance_regions, all_regions, proposed_map,
doors_to_connect, bk_needed, bk_special, world, player)
dungeon_cache[depth] = dungeon, hangers, hooks
valid = check_valid(name, dungeon, hangers, hooks, proposed_map, doors_to_connect, all_regions,
bk_needed, bk_special, paths, entrance_regions, world, player)
else:
dungeon, hangers, hooks = dungeon_cache[depth]
valid = True
if valid:
if len(proposed_map) == len(doors_to_connect):
if dungeon['Origin'].pinball_used:
door = world.get_door('Skull Pinball WS', player)
room = world.get_room(door.roomIndex, player)
if room.doorList[door.doorListPos][1] == DoorKind.Trap:
room.change(door.doorListPos, DoorKind.Normal)
door.trapFlag = 0x0
door.blocked = False
finished = True
continue
prev_choices = choices_master[depth]
# make a choice
hanger, hook = make_a_choice(dungeon, hangers, hooks, prev_choices, name)
if hanger is None:
backtrack = True
else:
logger.debug(' ' * depth + "%d: Linking %s to %s", depth, hanger.name, hook.name)
proposed_map[hanger] = hook
proposed_map[hook] = hanger
last_choice = (hanger, hook)
choices_master[depth].append(last_choice)
depth += 1
choices_master.append([])
else:
backtrack = True
if backtrack:
backtrack = False
choices_master.pop()
dungeon_cache.pop(depth, None)
depth -= 1
if depth < 0:
raise GenerationException('Invalid dungeon. Ref %s' % name)
a, b = choices_master[depth][-1]
logger.debug(' ' * depth + "%d: Rescinding %s, %s", depth, a.name, b.name)
proposed_map.pop(a, None)
proposed_map.pop(b, None)
return proposed_map
def determine_if_bk_needed(sector, split_dungeon, world, player):
if not split_dungeon:
for region in sector.regions:
for ext in region.exits:
door = world.check_for_door(ext.name, player)
if door is not None and door.bigKey:
return True
return False
def check_for_special(sector):
for region in sector.regions:
for loc in region.locations:
if loc.forced_big_key():
return True
return False
def gen_dungeon_info(name, available_sectors, entrance_regions, all_regions, proposed_map, valid_doors, bk_needed, bk_special, world, player):
# step 1 create dungeon: Dict<DoorName|Origin, GraphPiece>
dungeon = {}
start = ExplorationState(dungeon=name)
start.big_key_special = bk_special
group_flags, door_map = find_bk_groups(name, available_sectors, proposed_map, bk_special)
bk_flag = False if world.bigkeyshuffle[player] and not bk_special else bk_needed
def exception(d):
return name == 'Skull Woods 2' and d.name == 'Skull Pinball WS'
original_state = extend_reachable_state_improved(entrance_regions, start, proposed_map, all_regions,
valid_doors, bk_flag, world, player, exception)
dungeon['Origin'] = create_graph_piece_from_state(None, original_state, original_state, proposed_map, exception)
either_crystal = True # if all hooks from the origin are either, explore all bits with either
for hook, crystal in dungeon['Origin'].hooks.items():
if crystal != CrystalBarrier.Either:
either_crystal = False
break
init_crystal = CrystalBarrier.Either if either_crystal else CrystalBarrier.Orange
hanger_set = set()
o_state_cache = {}
for sector in available_sectors:
for door in sector.outstanding_doors:
if door not in proposed_map.keys():
hanger_set.add(door)
bk_flag = group_flags[door_map[door]]
parent = door.entrance.parent_region
crystal_start = CrystalBarrier.Either if parent.crystal_switch else init_crystal
init_state = ExplorationState(crystal_start, dungeon=name)
init_state.big_key_special = start.big_key_special
o_state = extend_reachable_state_improved([parent], init_state, proposed_map, all_regions,
valid_doors, bk_flag, world, player, exception)
o_state_cache[door.name] = o_state
piece = create_graph_piece_from_state(door, o_state, o_state, proposed_map, exception)
dungeon[door.name] = piece
check_blue_states(hanger_set, dungeon, o_state_cache, proposed_map, all_regions, valid_doors,
group_flags, door_map, world, player, exception)
# catalog hooks: Dict<Hook, List<Door, Crystal, Door>>
# and hangers: Dict<Hang, List<Door>>
avail_hooks = defaultdict(list)
hangers = defaultdict(list)
for key, piece in dungeon.items():
door_hang = piece.hanger_info
if door_hang is not None:
hanger = hanger_from_door(door_hang)
hangers[hanger].append(door_hang)
for door, crystal in piece.hooks.items():
hook = hook_from_door(door)
avail_hooks[hook].append((door, crystal, door_hang))
# thin out invalid hanger
winnow_hangers(hangers, avail_hooks)
return dungeon, hangers, avail_hooks
def find_bk_groups(name, available_sectors, proposed_map, bk_special):
groups = {}
door_ids = {}
gid = 1
for sector in available_sectors:
if bk_special:
my_gid = None
for door in sector.outstanding_doors:
if door in proposed_map and proposed_map[door] in door_ids:
if my_gid:
merge_gid = door_ids[proposed_map[door]]
for door in door_ids.keys():
if door_ids[door] == merge_gid:
door_ids[door] = my_gid
groups[my_gid] = groups[my_gid] or groups[merge_gid]
else:
my_gid = door_ids[proposed_map[door]]
if not my_gid:
my_gid = gid
gid += 1
for door in sector.outstanding_doors:
door_ids[door] = my_gid
if my_gid not in groups.keys():
groups[my_gid] = False
for region in sector.regions:
for loc in region.locations:
if loc.forced_item and loc.item.bigkey and name in loc.item.name:
groups[my_gid] = True
else:
for door in sector.outstanding_doors:
door_ids[door] = gid
groups[gid] = False
return groups, door_ids
def check_blue_states(hanger_set, dungeon, o_state_cache, proposed_map, all_regions, valid_doors, group_flags, door_map,
world, player, exception):
not_blue = set()
not_blue.update(hanger_set)
doors_to_check = set()
doors_to_check.update(hanger_set) # doors to check, check everything on first pass
blue_hooks = []
blue_hangers = []
new_blues = True
while new_blues:
new_blues = False
for door in doors_to_check:
piece = dungeon[door.name]
for hook, crystal in piece.hooks.items():
if crystal != CrystalBarrier.Orange:
h_type = hook_from_door(hook)
if h_type not in blue_hooks:
new_blues = True
blue_hooks.append(h_type)
if piece.hanger_crystal == CrystalBarrier.Either:
h_type = hanger_from_door(piece.hanger_info)
if h_type not in blue_hangers:
new_blues = True
blue_hangers.append(h_type)
doors_to_check = set()
for door in not_blue: # am I now blue?
hang_type = hanger_from_door(door) # am I hangable on a hook?
hook_type = hook_from_door(door) # am I hookable onto a hanger?
if (hang_type in blue_hooks and not door.stonewall) or hook_type in blue_hangers:
bk_flag = group_flags[door_map[door]]
explore_blue_state(door, dungeon, o_state_cache[door.name], proposed_map, all_regions, valid_doors,
bk_flag, world, player, exception)
doors_to_check.add(door)
not_blue.difference_update(doors_to_check)
def explore_blue_state(door, dungeon, o_state, proposed_map, all_regions, valid_doors, bk_flag, world, player, exception):
parent = door.entrance.parent_region
blue_start = ExplorationState(CrystalBarrier.Blue, o_state.dungeon)
blue_start.big_key_special = o_state.big_key_special
b_state = extend_reachable_state_improved([parent], blue_start, proposed_map, all_regions, valid_doors, bk_flag,
world, player, exception)
dungeon[door.name] = create_graph_piece_from_state(door, o_state, b_state, proposed_map, exception)
def make_a_choice(dungeon, hangers, avail_hooks, prev_choices, name):
# choose a hanger
all_hooks = {}
origin = dungeon['Origin']
for key in avail_hooks.keys():
for hstuff in avail_hooks[key]:
all_hooks[hstuff[0]] = None
candidate_hangers = []
for key in hangers.keys():
candidate_hangers.extend(hangers[key])
candidate_hangers.sort(key=lambda x: x.name) # sorting to create predictable seeds
random.shuffle(candidate_hangers) # randomize if equal preference
stage_2_hangers = []
if len(prev_choices) > 0:
prev_hanger = prev_choices[0][0]
if prev_hanger in candidate_hangers:
stage_2_hangers.append(prev_hanger)
candidate_hangers.remove(prev_hanger)
hookable_hangers = collections.deque()
queue = collections.deque(candidate_hangers)
while len(queue) > 0:
c_hang = queue.popleft()
if c_hang in all_hooks.keys():
hookable_hangers.append(c_hang)
else:
stage_2_hangers.append(c_hang) # prefer hangers that are not hooks
# todo : prefer hangers with fewer hooks at some point? not sure about this
# this prefer hangers of the fewest type - to catch problems fast
hookable_hangers = sorted(hookable_hangers, key=lambda door: len(hangers[hanger_from_door(door)]), reverse=True)
origin_hangers = []
while len(hookable_hangers) > 0:
c_hang = hookable_hangers.pop()
if c_hang in origin.hooks.keys():
origin_hangers.append(c_hang)
else:
stage_2_hangers.append(c_hang) # prefer hangers that are not hooks on the 'origin'
stage_2_hangers.extend(origin_hangers)
hook = None
next_hanger = None
while hook is None:
if len(stage_2_hangers) == 0:
return None, None
next_hanger = stage_2_hangers.pop(0)
next_hanger_type = hanger_from_door(next_hanger)
hook_candidates = []
for door, crystal, orig_hang in avail_hooks[next_hanger_type]:
if filter_choices(next_hanger, door, orig_hang, prev_choices, hook_candidates):
hook_candidates.append(door)
if len(hook_candidates) > 0:
hook_candidates.sort(key=lambda x: x.name) # sort for deterministic seeds
hook = random.choice(tuple(hook_candidates))
elif name == 'Skull Woods 2' and next_hanger.name == 'Skull Pinball WS':
continue
else:
return None, None
return next_hanger, hook
def filter_choices(next_hanger, door, orig_hang, prev_choices, hook_candidates):
if (next_hanger, door) in prev_choices or (door, next_hanger) in prev_choices:
return False
return next_hanger != door and orig_hang != next_hanger and door not in hook_candidates
def check_valid(name, dungeon, hangers, hooks, proposed_map, doors_to_connect, all_regions,
bk_needed, bk_special, paths, entrance_regions, world, player):
# evaluate if everything is still plausible
# only origin is left in the dungeon and not everything is connected
if len(dungeon.keys()) <= 1 and len(proposed_map.keys()) < len(doors_to_connect):
return False
# origin has no more hooks, but not all doors have been proposed
if not world.bigkeyshuffle[player]:
possible_bks = len(dungeon['Origin'].possible_bk_locations)
true_origin_hooks = [x for x in dungeon['Origin'].hooks.keys() if not x.bigKey or possible_bks > 0 or not bk_needed]
if len(true_origin_hooks) == 0 and len(proposed_map.keys()) < len(doors_to_connect):
return False
if len(true_origin_hooks) == 0 and bk_needed and possible_bks == 0 and len(proposed_map.keys()) == len(doors_to_connect):
return False
for key in hangers.keys():
if len(hooks[key]) > 0 and len(hangers[key]) == 0:
return False
# todo: stonewall - check that there's no hook-only that is without a matching hanger
must_hang = defaultdict(list)
all_hooks = set()
for key in hooks.keys():
for hook in hooks[key]:
all_hooks.add(hook[0])
for key in hangers.keys():
for hanger in hangers[key]:
if hanger not in all_hooks:
must_hang[key].append(hanger)
for key in must_hang.keys():
if len(must_hang[key]) > len(hooks[key]):
return False
outstanding_doors = defaultdict(list)
for d in doors_to_connect.values():
if d not in proposed_map.keys():
outstanding_doors[hook_from_door(d)].append(d)
for key in outstanding_doors.keys():
opp_key = opposite_h_type(key)
if len(outstanding_doors[key]) > 0 and len(hangers[key]) == 0 and len(hooks[opp_key]) == 0:
return False
all_visited = set()
bk_possible = not bk_needed or (world.bigkeyshuffle[player] and not bk_special)
for piece in dungeon.values():
all_visited.update(piece.visited_regions)
if not bk_possible and len(piece.possible_bk_locations) > 0:
bk_possible = True
if len(all_regions.difference(all_visited)) > 0:
return False
if not bk_possible:
return False
if not valid_paths(name, paths, entrance_regions, doors_to_connect, all_regions, proposed_map,
bk_needed, bk_special, world, player):
return False
new_hangers_found = True
accessible_hook_types = []
hanger_matching = set()
all_hangers = set()
origin_hooks = set(dungeon['Origin'].hooks.keys())
for door_hook in origin_hooks:
h_type = hook_from_door(door_hook)
if h_type not in accessible_hook_types:
accessible_hook_types.append(h_type)
while new_hangers_found:
new_hangers_found = False
for hanger_set in hangers.values():
for hanger in hanger_set:
all_hangers.add(hanger)
h_type = hanger_from_door(hanger)
if (h_type in accessible_hook_types or hanger in origin_hooks) and hanger not in hanger_matching:
new_hangers_found = True
hanger_matching.add(hanger)
matching_hooks = dungeon[hanger.name].hooks.keys()
origin_hooks.update(matching_hooks)
for door_hook in matching_hooks:
new_h_type = hook_from_door(door_hook)
if new_h_type not in accessible_hook_types:
accessible_hook_types.append(new_h_type)
return len(all_hangers.difference(hanger_matching)) == 0
def valid_paths(name, paths, entrance_regions, valid_doors, all_regions, proposed_map,
bk_needed, bk_special, world, player):
for path in paths:
if type(path) is tuple:
target = path[1]
start_regions = []
for region in all_regions:
if path[0] == region.name:
start_regions.append(region)
break
else:
target = path
start_regions = entrance_regions
if not valid_path(name, start_regions, target, valid_doors, proposed_map, all_regions,
bk_needed, bk_special, world, player):
return False
return True
def valid_path(name, starting_regions, target, valid_doors, proposed_map, all_regions,
bk_needed, bk_special, world, player):
target_regions = set()
if type(target) is not list:
for region in all_regions:
if target == region.name:
target_regions.add(region)
break
else:
for region in all_regions:
if region.name in target:
target_regions.add(region)
start = ExplorationState(dungeon=name)
start.big_key_special = bk_special
bk_flag = False if world.bigkeyshuffle[player] and not bk_special else bk_needed
def exception(d):
return name == 'Skull Woods 2' and d.name == 'Skull Pinball WS'
original_state = extend_reachable_state_improved(starting_regions, start, proposed_map, all_regions,
valid_doors, bk_flag, world, player, exception)
for exp_door in original_state.unattached_doors:
if not exp_door.door.blocked:
return True
for target in target_regions:
if original_state.visited_at_all(target):
return True
return False
def determine_required_paths(world, player):
paths = {}
for name, builder in world.dungeon_layouts[player].items():
all_regions = builder.master_sector.regions
paths[name] = determine_paths_for_dungeon(world, player, all_regions, name)
return paths
boss_path_checks = ['Eastern Boss', 'Desert Boss', 'Hera Boss', 'Tower Agahnim 1', 'PoD Boss', 'Swamp Boss',
'Skull Boss', 'Ice Boss', 'Mire Boss', 'TR Boss', 'GT Agahnim 2']
# pinball is allowed to orphan you
drop_path_checks = ['Skull Pot Circle', 'Skull Left Drop', 'Skull Back Drop', 'Sewers Rat Path']
def determine_paths_for_dungeon(world, player, all_regions, name):
all_r_names = set(x.name for x in all_regions)
paths = []
non_hole_portals = []
for portal in world.dungeon_portals[player]:
if portal.door.entrance.parent_region in all_regions:
non_hole_portals.append(portal.door.entrance.parent_region.name)
if portal.destination:
paths.append(portal.door.entrance.parent_region.name)
if world.mode[player] == 'standard' and name == 'Hyrule Castle':
paths.append('Hyrule Dungeon Cellblock')
paths.append(('Hyrule Dungeon Cellblock', 'Sanctuary'))
if world.doorShuffle[player] in ['basic'] and name == 'Thieves Town':
paths.append('Thieves Attic Window')
elif 'Thieves Attic Window' in all_r_names:
paths.append('Thieves Attic Window')
for boss in boss_path_checks:
if boss in all_r_names:
paths.append(boss)
if 'Thieves Boss' in all_r_names:
paths.append('Thieves Boss')
paths.append(('Thieves Blind\'s Cell', 'Thieves Boss'))
for drop_check in drop_path_checks:
if drop_check in all_r_names:
paths.append((drop_check, non_hole_portals))
return paths
def winnow_hangers(hangers, hooks):
removal_info = []
for hanger, door_set in hangers.items():
for door in door_set:
hook_set = hooks[hanger]
if len(hook_set) == 0:
removal_info.append((hanger, door))
else:
found_valid = False
for door_hook, crystal, orig_hanger in hook_set:
if orig_hanger != door:
found_valid = True
break
if not found_valid:
removal_info.append((hanger, door))
for hanger, door in removal_info:
hangers[hanger].remove(door)
def stonewall_valid(stonewall):
bad_door = stonewall.dest
if bad_door.blocked:
return True
loop_region = stonewall.entrance.parent_region
start_regions = [bad_door.entrance.parent_region]
if bad_door.dependents:
for dep in bad_door.dependents:
start_regions.append(dep.entrance.parent_region)
queue = deque(start_regions)
visited = set(start_regions)
while len(queue) > 0:
region = queue.popleft()
if region == loop_region:
return False # guaranteed loop
possible_entrances = list(region.entrances)
for entrance in possible_entrances:
parent = entrance.parent_region
if parent.type != RegionType.Dungeon:
return False # you can get stuck from an entrance
else:
door = entrance.door
if (door is None or (door != stonewall and not door.blocked)) and parent not in visited:
visited.add(parent)
queue.append(parent)
# we didn't find anything bad
return True
def create_graph_piece_from_state(door, o_state, b_state, proposed_map, exception):
graph_piece = GraphPiece()
all_unattached = {}
for exp_d in o_state.unattached_doors:
all_unattached[exp_d.door] = exp_d.crystal
for exp_d in b_state.unattached_doors:
d = exp_d.door
if d in all_unattached.keys():
if all_unattached[d] != exp_d.crystal:
if all_unattached[d] == CrystalBarrier.Orange and exp_d.crystal == CrystalBarrier.Blue:
all_unattached[d] = CrystalBarrier.Null
elif all_unattached[d] == CrystalBarrier.Blue and exp_d.crystal == CrystalBarrier.Orange:
logging.getLogger('').warning('Mismatched state @ %s (o:%s b:%s)', d.name, all_unattached[d],
exp_d.crystal)
elif all_unattached[d] == CrystalBarrier.Either:
all_unattached[d] = exp_d.crystal
else:
all_unattached[exp_d.door] = exp_d.crystal
h_crystal = door.crystal if door is not None else None
for d, crystal in all_unattached.items():
if (door is None or d != door) and (not d.blocked or exception(d))and d not in proposed_map.keys():
graph_piece.hooks[d] = crystal
if d == door:
h_crystal = crystal
graph_piece.hanger_info = door
graph_piece.hanger_crystal = h_crystal
graph_piece.visited_regions.update(o_state.visited_blue)
graph_piece.visited_regions.update(o_state.visited_orange)
graph_piece.visited_regions.update(b_state.visited_blue)
graph_piece.visited_regions.update(b_state.visited_orange)
graph_piece.possible_bk_locations.update(filter_for_potential_bk_locations(o_state.bk_found))
graph_piece.possible_bk_locations.update(filter_for_potential_bk_locations(b_state.bk_found))
graph_piece.pinball_used = o_state.pinball_used or b_state.pinball_used
return graph_piece
def filter_for_potential_bk_locations(locations):
return [x for x in locations if
'- Big Chest' not in x.name and '- Prize' not in x.name and x.name not in dungeon_events
and not x.forced_item and x.name not in ['Agahnim 1', 'Agahnim 2']]
type_map = {
Hook.Stairs: Hook.Stairs,
Hook.North: Hook.South,
Hook.South: Hook.North,
Hook.West: Hook.East,
Hook.East: Hook.West
}
def opposite_h_type(h_type) -> Hook:
return type_map[h_type]
hang_dir_map = {
Direction.North: Hook.South,
Direction.South: Hook.North,
Direction.West: Hook.East,
Direction.East: Hook.West,
}
def hanger_from_door(door):
if door.type == DoorType.SpiralStairs:
return Hook.Stairs
if door.type in [DoorType.Normal, DoorType.Open, DoorType.StraightStairs, DoorType.Ladder]:
return hang_dir_map[door.direction]
return None
def connect_doors(a, b):
if a.type in [DoorType.Hole, DoorType.Warp, DoorType.Interior, DoorType.Logical]:
return
if a.type in [DoorType.Normal, DoorType.SpiralStairs, DoorType.Open, DoorType.StraightStairs, DoorType.Ladder]:
if a.blocked:
connect_one_way(b.entrance, a.entrance)
elif b.blocked:
connect_one_way(a.entrance, b.entrance)
else:
connect_two_way(a.entrance, b.entrance)
dep_doors, target = [], None
if len(a.dependents) > 0:
dep_doors, target = a.dependents, b
elif len(b.dependents) > 0:
dep_doors, target = b.dependents, a
if target is not None:
target_region = target.entrance.parent_region
for dep in dep_doors:
connect_simple_door(dep, target_region)
return
raise RuntimeError('Unknown door type ' + a.type.name)
def connect_two_way(entrance, ext):
if entrance.connected_region is not None:
entrance.connected_region.entrances.remove(entrance)
if ext.connected_region is not None:
ext.connected_region.entrances.remove(ext)
entrance.connect(ext.parent_region)
ext.connect(entrance.parent_region)
if entrance.parent_region.dungeon:
ext.parent_region.dungeon = entrance.parent_region.dungeon
x = entrance.door
y = ext.door
if x is not None:
x.dest = y
if y is not None:
y.dest = x
def connect_one_way(entrance, ext):
if entrance.connected_region is not None:
entrance.connected_region.entrances.remove(entrance)
if ext.connected_region is not None:
ext.connected_region.entrances.remove(ext)
entrance.connect(ext.parent_region)
if entrance.parent_region.dungeon:
ext.parent_region.dungeon = entrance.parent_region.dungeon
x = entrance.door
y = ext.door
if x is not None:
x.dest = y
if y is not None:
y.dest = x
def connect_simple_door(exit_door, region):
exit_door.entrance.connect(region)
exit_door.dest = region
special_big_key_doors = ['Hyrule Dungeon Cellblock Door', "Thieves Blind's Cell Door"]
class ExplorationState(object):
def __init__(self, init_crystal=CrystalBarrier.Orange, dungeon=None):
self.unattached_doors = []
self.avail_doors = []
self.event_doors = []
self.visited_orange = []
self.visited_blue = []
self.events = set()
self.crystal = init_crystal
# key region stuff
self.door_krs = {}
# key validation stuff
self.small_doors = []
self.big_doors = []
self.opened_doors = []
self.big_key_opened = False
self.big_key_special = False
self.found_locations = []
self.ttl_locations = 0
self.used_locations = 0
self.key_locations = 0
self.used_smalls = 0
self.bk_found = set()
self.non_door_entrances = []
self.dungeon = dungeon
self.pinball_used = False
def copy(self):
ret = ExplorationState(dungeon=self.dungeon)
ret.unattached_doors = list(self.unattached_doors)
ret.avail_doors = list(self.avail_doors)
ret.event_doors = list(self.event_doors)
ret.visited_orange = list(self.visited_orange)
ret.visited_blue = list(self.visited_blue)
ret.events = set(self.events)
ret.crystal = self.crystal
ret.door_krs = self.door_krs.copy()
ret.small_doors = list(self.small_doors)
ret.big_doors = list(self.big_doors)
ret.opened_doors = list(self.opened_doors)
ret.big_key_opened = self.big_key_opened
ret.big_key_special = self.big_key_special
ret.ttl_locations = self.ttl_locations
ret.key_locations = self.key_locations
ret.used_locations = self.used_locations
ret.used_smalls = self.used_smalls
ret.found_locations = list(self.found_locations)
ret.bk_found = set(self.bk_found)
ret.non_door_entrances = list(self.non_door_entrances)
ret.dungeon = self.dungeon
ret.pinball_used = self.pinball_used
return ret
def next_avail_door(self):
self.avail_doors.sort(key=lambda x: 0 if x.flag else 1 if x.door.bigKey else 2)
exp_door = self.avail_doors.pop()
self.crystal = exp_door.crystal
return exp_door
def visit_region(self, region, key_region=None, key_checks=False, bk_Flag=False):
if self.crystal == CrystalBarrier.Either:
if region not in self.visited_blue:
self.visited_blue.append(region)
if region not in self.visited_orange:
self.visited_orange.append(region)
elif self.crystal == CrystalBarrier.Orange:
self.visited_orange.append(region)
elif self.crystal == CrystalBarrier.Blue:
self.visited_blue.append(region)
if region.type == RegionType.Dungeon:
for location in region.locations:
if key_checks and location not in self.found_locations:
if location.forced_item and 'Small Key' in location.item.name:
self.key_locations += 1
if location.name not in dungeon_events and '- Prize' not in location.name and location.name not in ['Agahnim 1', 'Agahnim 2']:
self.ttl_locations += 1
if location not in self.found_locations: # todo: special logic for TT Boss?
self.found_locations.append(location)
if not bk_Flag:
self.bk_found.add(location)
if location.name in dungeon_events and location.name not in self.events:
if self.flooded_key_check(location):
self.perform_event(location.name, key_region)
if location.name in flooded_keys_reverse.keys() and self.location_found(
flooded_keys_reverse[location.name]):
self.perform_event(flooded_keys_reverse[location.name], key_region)
def flooded_key_check(self, location):
if location.name not in flooded_keys.keys():
return True
return flooded_keys[location.name] in [x.name for x in self.found_locations]
def location_found(self, location_name):
for l in self.found_locations:
if l.name == location_name:
return True
return False
def perform_event(self, location_name, key_region):
self.events.add(location_name)
queue = collections.deque(self.event_doors)
while len(queue) > 0:
exp_door = queue.popleft()
if exp_door.door.req_event == location_name:
self.avail_doors.append(exp_door)
self.event_doors.remove(exp_door)
if key_region is not None:
d_name = exp_door.door.name
if d_name not in self.door_krs.keys():
self.door_krs[d_name] = key_region
def add_all_entrance_doors_check_unattached(self, region, world, player):
door_list = [x for x in get_doors(world, region, player) if x.type in [DoorType.Normal, DoorType.SpiralStairs]]
door_list.extend(get_entrance_doors(world, region, player))
for door in door_list:
if self.can_traverse(door):
if door.dest is None and not self.in_door_list_ic(door, self.unattached_doors):
self.append_door_to_list(door, self.unattached_doors)
elif door.req_event is not None and door.req_event not in self.events and not self.in_door_list(door,
self.event_doors):
self.append_door_to_list(door, self.event_doors)
elif not self.in_door_list(door, self.avail_doors):
self.append_door_to_list(door, self.avail_doors)
for entrance in region.entrances:
door = world.check_for_door(entrance.name, player)
if door is None:
self.non_door_entrances.append(entrance)
def add_all_doors_check_unattached(self, region, world, player):
for door in get_doors(world, region, player):
if self.can_traverse(door):
if door.controller is not None:
door = door.controller
if door.dest is None and not self.in_door_list_ic(door, self.unattached_doors):
self.append_door_to_list(door, self.unattached_doors)
elif door.req_event is not None and door.req_event not in self.events and not self.in_door_list(door,
self.event_doors):
self.append_door_to_list(door, self.event_doors)
elif not self.in_door_list(door, self.avail_doors):
self.append_door_to_list(door, self.avail_doors)
def add_all_doors_check_proposed(self, region, proposed_map, valid_doors, flag, world, player, exception):
for door in get_doors(world, region, player):
if door.blocked and exception(door):
self.pinball_used = True
if self.can_traverse(door, exception):
if door.controller is not None:
door = door.controller
if door.dest is None and door not in proposed_map.keys() and door.name in valid_doors.keys():
if not self.in_door_list_ic(door, self.unattached_doors):
self.append_door_to_list(door, self.unattached_doors, flag)
else:
other = self.find_door_in_list(door, self.unattached_doors)
if self.crystal != other.crystal:
other.crystal = CrystalBarrier.Either
elif door.req_event is not None and door.req_event not in self.events and not self.in_door_list(door,
self.event_doors):
self.append_door_to_list(door, self.event_doors, flag)
elif not self.in_door_list(door, self.avail_doors):
self.append_door_to_list(door, self.avail_doors, flag)
def add_all_doors_check_key_region(self, region, key_region, world, player):
for door in get_doors(world, region, player):
if self.can_traverse(door):
if door.req_event is not None and door.req_event not in self.events and not self.in_door_list(door,
self.event_doors):
self.append_door_to_list(door, self.event_doors)
elif not self.in_door_list(door, self.avail_doors):
self.append_door_to_list(door, self.avail_doors)
if door.name not in self.door_krs.keys():
self.door_krs[door.name] = key_region
else:
if door.name not in self.door_krs.keys():
self.door_krs[door.name] = key_region
def add_all_doors_check_keys(self, region, key_door_proposal, world, player):
for door in get_doors(world, region, player):
if self.can_traverse(door):
if door.controller:
door = door.controller
if door in key_door_proposal and door not in self.opened_doors:
if not self.in_door_list(door, self.small_doors):
self.append_door_to_list(door, self.small_doors)
elif (door.bigKey or door.name in special_big_key_doors) and not self.big_key_opened:
if not self.in_door_list(door, self.big_doors):
self.append_door_to_list(door, self.big_doors)
elif door.req_event is not None and door.req_event not in self.events:
if not self.in_door_list(door, self.event_doors):
self.append_door_to_list(door, self.event_doors)
elif not self.in_door_list(door, self.avail_doors):
self.append_door_to_list(door, self.avail_doors)
def visited(self, region):
if self.crystal == CrystalBarrier.Either:
return region in self.visited_blue and region in self.visited_orange
elif self.crystal == CrystalBarrier.Orange:
return region in self.visited_orange
elif self.crystal == CrystalBarrier.Blue:
return region in self.visited_blue
return False
def visited_at_all(self, region):
return region in self.visited_blue or region in self.visited_orange
def found_forced_bk(self):
for location in self.found_locations:
if location.forced_big_key():
return True
return False
def can_traverse(self, door, exception=None):
if door.blocked:
return exception(door) if exception else False
if door.crystal not in [CrystalBarrier.Null, CrystalBarrier.Either]:
return self.crystal == CrystalBarrier.Either or door.crystal == self.crystal
return True
def count_locations_exclude_specials(self):
cnt = 0
for loc in self.found_locations:
if '- Big Chest' not in loc.name and '- Prize' not in loc.name and loc.name not in dungeon_events and not loc.forced_item:
cnt += 1
return cnt
def validate(self, door, region, world, player):
return self.can_traverse(door) and not self.visited(region) and valid_region_to_explore(region, self.dungeon,
world, player)
def in_door_list(self, door, door_list):
for d in door_list:
if d.door == door and d.crystal == self.crystal:
return True
return False
@staticmethod
def in_door_list_ic(door, door_list):
for d in door_list:
if d.door == door:
return True
return False
@staticmethod
def find_door_in_list(door, door_list):
for d in door_list:
if d.door == door:
return d
return None
def append_door_to_list(self, door, door_list, flag=False):
if door.crystal == CrystalBarrier.Null:
door_list.append(ExplorableDoor(door, self.crystal, flag))
else:
door_list.append(ExplorableDoor(door, door.crystal, flag))
def key_door_sort(self, d):
if d.door.smallKey:
if d.door in self.opened_doors:
return 1
else:
return 0
return 2
class ExplorableDoor(object):
def __init__(self, door, crystal, flag):
self.door = door
self.crystal = crystal
self.flag = flag
def __str__(self):
return str(self.__unicode__())
def __unicode__(self):
return '%s (%s)' % (self.door.name, self.crystal.name)
def extend_reachable_state_improved(search_regions, state, proposed_map, all_regions, valid_doors, bk_flag, world, player, exception):
local_state = state.copy()
for region in search_regions:
local_state.visit_region(region)
local_state.add_all_doors_check_proposed(region, proposed_map, valid_doors, False, world, player, exception)
while len(local_state.avail_doors) > 0:
explorable_door = local_state.next_avail_door()
if explorable_door.door.bigKey:
if bk_flag:
big_not_found = not special_big_key_found(local_state) if local_state.big_key_special else local_state.count_locations_exclude_specials() == 0
if big_not_found:
continue # we can't open this door
if explorable_door.door in proposed_map:
connect_region = world.get_entrance(proposed_map[explorable_door.door].name, player).parent_region
else:
connect_region = world.get_entrance(explorable_door.door.name, player).connected_region
if connect_region is not None:
if valid_region_to_explore_in_regions(connect_region, all_regions, world, player) and not local_state.visited(
connect_region):
flag = explorable_door.flag or explorable_door.door.bigKey
local_state.visit_region(connect_region, bk_Flag=flag)
local_state.add_all_doors_check_proposed(connect_region, proposed_map, valid_doors, flag, world, player, exception)
return local_state
def special_big_key_found(state):
for location in state.found_locations:
if location.forced_item and location.forced_item.bigkey:
return True
return False
def valid_region_to_explore_in_regions(region, all_regions, world, player):
if region is None:
return False
return (region.type == RegionType.Dungeon and region in all_regions)\
or region.name in world.inaccessible_regions[player]\
or (region.name == 'Hyrule Castle Ledge' and world.mode[player] == 'standard')
def valid_region_to_explore(region, name, world, player):
if region is None:
return False
return (region.type == RegionType.Dungeon and region.dungeon.name in name)\
or region.name in world.inaccessible_regions[player]\
or (region.name == 'Hyrule Castle Ledge' and world.mode[player] == 'standard')
def get_doors(world, region, player):
res = []
for ext in region.exits:
door = world.check_for_door(ext.name, player)
if door is not None:
res.append(door)
return res
def get_dungeon_doors(region, world, player):
res = []
for ext in region.exits:
door = world.check_for_door(ext.name, player)
if door is not None and ext.parent_region.type == RegionType.Dungeon:
res.append(door)
return res
def get_entrance_doors(world, region, player):
res = []
for ext in region.entrances:
door = world.check_for_door(ext.name, player)
if door is not None:
res.append(door)
return res
def convert_regions(region_names, world, player):
region_list = []
for name in region_names:
region_list.append(world.get_region(name, player))
return region_list
class DungeonBuilder(object):
def __init__(self, name):
self.name = name
self.sectors = []
self.location_cnt = 0
self.key_drop_cnt = 0
self.bk_required = False
self.bk_provided = False
self.c_switch_required = False
self.c_switch_present = False
self.c_locked = False
self.dead_ends = 0
self.branches = 0
self.forced_loops = 0
self.total_conn_lack = 0
self.conn_needed = defaultdict(int)
self.conn_supplied = defaultdict(int)
self.conn_balance = defaultdict(int)
self.mag_needed = {}
self.unfulfilled = defaultdict(int)
self.all_entrances = None
self.entrance_list = None
self.layout_starts = None
self.master_sector = None
self.path_entrances = None
self.split_flag = False
self.pre_open_stonewalls = set()
self.candidates = None
self.key_doors_num = None
self.combo_size = None
self.flex = 0
self.key_door_proposal = None
self.allowance = None
if 'Stonewall' in name:
self.allowance = 1
elif 'Prewall' in name:
orig_name = name[:-8]
if orig_name in dungeon_dead_end_allowance.keys():
self.allowance = dungeon_dead_end_allowance[orig_name]
if self.allowance is None:
self.allowance = 1
self.valid_proposal = None
self.split_dungeon_map = None
self.exception_list = []
def polarity_complement(self):
pol = Polarity()
for sector in self.sectors:
pol += sector.polarity()
return pol.complement()
def polarity(self):
pol = Polarity()
for sector in self.sectors:
pol += sector.polarity()
return pol
def __str__(self):
return str(self.__unicode__())
def __unicode__(self):
return '%s' % self.name
def simple_dungeon_builder(name, sector_list):
define_sector_features(sector_list)
builder = DungeonBuilder(name)
dummy_pool = dict.fromkeys(sector_list)
global_pole = GlobalPolarity(dummy_pool)
for sector in sector_list:
assign_sector(sector, builder, dummy_pool, global_pole)
return builder
def create_dungeon_builders(all_sectors, connections_tuple, world, player,
dungeon_entrances=None, split_dungeon_entrances=None):
logger = logging.getLogger('')
logger.info('Shuffling Dungeon Sectors')
if dungeon_entrances is None:
dungeon_entrances = default_dungeon_entrances
if split_dungeon_entrances is None:
split_dungeon_entrances = split_region_starts
define_sector_features(all_sectors)
finished, dungeon_map, attempts = False, {}, 0
while not finished:
candidate_sectors = dict.fromkeys(all_sectors)
global_pole = GlobalPolarity(candidate_sectors)
dungeon_map = {}
for key in dungeon_regions.keys():
dungeon_map[key] = DungeonBuilder(key)
for key in dungeon_boss_sectors.keys():
current_dungeon = dungeon_map[key]
for r_name in dungeon_boss_sectors[key]:
assign_sector(find_sector(r_name, candidate_sectors), current_dungeon, candidate_sectors, global_pole)
if key == 'Hyrule Castle' and world.mode[player] == 'standard':
for r_name in ['Hyrule Dungeon Cellblock', 'Sanctuary']:
assign_sector(find_sector(r_name, candidate_sectors), current_dungeon,
candidate_sectors, global_pole)
entrances_map, potentials, connections = connections_tuple
accessible_sectors, reverse_d_map = set(), {}
for key in dungeon_entrances.keys():
current_dungeon = dungeon_map[key]
current_dungeon.all_entrances = dungeon_entrances[key]
for r_name in current_dungeon.all_entrances:
sector = find_sector(r_name, candidate_sectors)
assign_sector(sector, current_dungeon, candidate_sectors, global_pole)
if r_name in entrances_map[key]:
if sector:
accessible_sectors.add(sector)
else:
if not sector:
sector = find_sector(r_name, all_sectors)
reverse_d_map[sector] = key
if world.mode[player] == 'standard':
current_dungeon = dungeon_map['Hyrule Castle']
standard_stair_check(dungeon_map, current_dungeon, candidate_sectors, global_pole)
complete_dungeons = {x: y for x, y in dungeon_map.items() if sum(len(sector.outstanding_doors) for sector in y.sectors) <= 0}
[dungeon_map.pop(key) for key in complete_dungeons.keys()]
identify_destination_sectors(accessible_sectors, reverse_d_map, dungeon_map, connections,
dungeon_entrances, split_dungeon_entrances)
for name, builder in dungeon_map.items():
calc_allowance_and_dead_ends(builder, connections_tuple, world, player)
if world.mode[player] == 'open' and world.shuffle[player] not in ['crossed', 'insanity']:
sanc = find_sector('Sanctuary', candidate_sectors)
if sanc:
lw_builders = []
for name, portal_list in dungeon_portals.items():
for portal_name in portal_list:
if world.get_portal(portal_name, player).light_world:
lw_builders.append(dungeon_map[name])
break
sanc_builder = random.choice(lw_builders)
assign_sector(sanc, sanc_builder, candidate_sectors, global_pole)
free_location_sectors = {}
crystal_switches = {}
crystal_barriers = {}
polarized_sectors = {}
neutral_sectors = {}
for sector in candidate_sectors:
if sector.chest_locations > 0:
free_location_sectors[sector] = None
elif sector.c_switch:
crystal_switches[sector] = None
elif sector.blue_barrier:
crystal_barriers[sector] = None
elif sector.polarity().is_neutral():
neutral_sectors[sector] = None
else:
polarized_sectors[sector] = None
assign_location_sectors(dungeon_map, free_location_sectors, global_pole)
leftover = assign_crystal_switch_sectors(dungeon_map, crystal_switches, crystal_barriers, global_pole)
ensure_crystal_switches_reachable(dungeon_map, leftover, polarized_sectors, crystal_barriers, global_pole)
for sector in leftover:
if sector.polarity().is_neutral():
neutral_sectors[sector] = None
else:
polarized_sectors[sector] = None
assign_crystal_barrier_sectors(dungeon_map, crystal_barriers, global_pole)
try:
if not global_pole.is_valid(dungeon_map):
raise NeutralizingException('Either free location/crystal assignment is already globally invalid')
logger.info(world.fish.translate("cli", "cli", "balance.doors"))
builder_info = dungeon_entrances, split_dungeon_entrances, connections_tuple, world, player
assign_polarized_sectors(dungeon_map, polarized_sectors, global_pole, builder_info)
assign_the_rest(dungeon_map, neutral_sectors, global_pole, builder_info)
dungeon_map.update(complete_dungeons)
finished = True
except (NeutralizingException, GenerationException) as e:
attempts += 1
logger.debug(f'Attempt {attempts} failed with {str(e)}')
if attempts >= 10:
raise Exception('Could not find a valid seed quickly, something is likely horribly wrong.', e)
return dungeon_map
def standard_stair_check(dungeon_map, dungeon, candidate_sectors, global_pole):
filtered_sectors = [x for x in candidate_sectors if any(y for y in x.outstanding_doors if not y.dead and y.type == DoorType.SpiralStairs)]
valid = False
while not valid:
chosen_sector = random.choice(filtered_sectors)
filtered_sectors.remove(chosen_sector)
valid = global_pole.is_valid_choice(dungeon_map, dungeon, [chosen_sector])
if valid:
assign_sector(chosen_sector, dungeon, candidate_sectors, global_pole)
def identify_destination_sectors(accessible_sectors, reverse_d_map, dungeon_map, connections, dungeon_entrances, split_dungeon_entrances):
accessible_overworld, found_connections, explored = set(), set(), False
while not explored:
explored = True
for ent_name, region in connections.items():
if ent_name in found_connections:
continue
sector = find_sector(ent_name, reverse_d_map.keys())
if sector in accessible_sectors:
found_connections.add(ent_name)
accessible_overworld.add(region)
explored = False
elif region in accessible_overworld:
found_connections.add(ent_name)
accessible_sectors.add(sector)
explored = False
else:
d_name = reverse_d_map[sector]
if d_name not in split_dungeon_entrances:
for r_name in dungeon_entrances[d_name]:
ent_sector = find_sector(r_name, dungeon_map[d_name].sectors)
if ent_sector in accessible_sectors and ent_name not in dead_entrances:
sector.destination_entrance = True
found_connections.add(ent_name)
accessible_sectors.add(sector)
accessible_overworld.add(region)
explored = False
break
elif d_name in split_dungeon_entrances.keys():
split_section = None
for split_name, split_list in split_dungeon_entrances[d_name].items():
if ent_name in split_list:
split_section = split_name
break
if split_section:
for r_name in split_dungeon_entrances[d_name][split_section]:
ent_sector = find_sector(r_name, dungeon_map[d_name].sectors)
if ent_sector in accessible_sectors and ent_name not in dead_entrances:
sector.destination_entrance = True
found_connections.add(ent_name)
accessible_sectors.add(sector)
accessible_overworld.add(region)
explored = False
break
# todo: split version that adds allowance for potential entrances
def calc_allowance_and_dead_ends(builder, connections_tuple, world, player):
portals = world.dungeon_portals[player]
entrances_map, potentials, connections = connections_tuple
name = builder.name if not builder.split_flag else builder.name.rsplit(' ', 1)[0]
needed_connections = [x for x in builder.all_entrances if x not in entrances_map[name]]
starting_allowance = 0
used_sectors = set()
destination_entrances = [x.door.entrance.parent_region.name for x in portals if x.destination]
dead_ends = [x.door.entrance.parent_region.name for x in portals if x.deadEnd]
for entrance in entrances_map[name]:
sector = find_sector(entrance, builder.sectors)
if sector:
outflow_target = 0 if entrance not in drop_entrances_allowance else 1
if sector not in used_sectors and (sector.adj_outflow() > outflow_target or entrance in dead_ends):
if entrance not in destination_entrances:
starting_allowance += 1
else:
builder.branches -= 1
used_sectors.add(sector)
elif sector not in used_sectors:
if entrance in destination_entrances and sector.branches() > 0:
builder.branches -= 1
if entrance not in drop_entrances_allowance:
needed_connections.append(entrance)
builder.allowance = starting_allowance
for entrance in needed_connections:
sector = find_sector(entrance, builder.sectors)
if sector and sector not in used_sectors: # ignore things on same sector
is_destination = entrance in destination_entrances
connect_able = False
if entrance in connections.keys():
enabling_region = connections[entrance]
check_list = list(potentials[enabling_region])
if enabling_region.name in ['Desert Ledge', 'Desert Palace Entrance (North) Spot']:
alternate = 'Desert Palace Entrance (North) Spot' if enabling_region.name == 'Desert Ledge' else 'Desert Ledge'
if world.get_region(alternate, player) in potentials:
check_list.extend(potentials[world.get_region(alternate, player)])
connecting_entrances = [x for x in check_list if x != entrance and x not in dead_entrances and x not in drop_entrances_allowance]
connect_able = len(connecting_entrances) > 0
if is_destination and sector.branches() == 0: #
builder.dead_ends += 1
if is_destination and sector.branches() > 0:
builder.branches -= 1
if connect_able and not is_destination:
builder.allowance += 1
used_sectors.add(sector)
def define_sector_features(sectors):
for sector in sectors:
for region in sector.regions:
for loc in region.locations:
if '- Prize' in loc.name or loc.name in ['Agahnim 1', 'Agahnim 2']:
pass
elif loc.forced_item and 'Small Key' in loc.item.name:
sector.key_only_locations += 1
elif loc.forced_item and loc.forced_item.bigkey:
sector.bk_provided = True
elif loc.name not in dungeon_events and not loc.forced_item:
sector.chest_locations += 1
if '- Big Chest' in loc.name or loc.name in ["Hyrule Castle - Zelda's Chest",
"Thieves' Town - Blind's Cell"]:
sector.bk_required = True
for ext in region.exits:
door = ext.door
if door is not None:
if door.crystal == CrystalBarrier.Either:
sector.c_switch = True
elif door.crystal == CrystalBarrier.Orange:
sector.orange_barrier = True
elif door.crystal == CrystalBarrier.Blue:
sector.blue_barrier = True
if door.bigKey:
sector.bk_required = True
def assign_sector(sector, dungeon, candidate_sectors, global_pole):
if sector:
del candidate_sectors[sector]
global_pole.consume(sector)
assign_sector_helper(sector, dungeon)
def assign_sector_helper(sector, builder):
builder.sectors.append(sector)
builder.location_cnt += sector.chest_locations
builder.key_drop_cnt += sector.key_only_locations
if sector.c_switch:
builder.c_switch_present = True
if sector.blue_barrier:
builder.c_switch_required = True
if sector.bk_required:
builder.bk_required = True
if sector.bk_provided:
builder.bk_provided = True
count_conn_needed_supplied(sector, builder.conn_needed, builder.conn_supplied)
builder.dead_ends += sector.dead_ends()
builder.branches += sector.branches()
if sector in builder.exception_list:
builder.exception_list.remove(sector)
else:
if builder.split_dungeon_map:
builder.split_dungeon_map = None
if builder.valid_proposal:
builder.valid_proposal = None
def count_conn_needed_supplied(sector, conn_needed, conn_supplied):
for door in sector.outstanding_doors:
if (door.blocked or door.dead or sector.adj_outflow() <= 1) and not sector.is_entrance_sector():
conn_needed[hook_from_door(door)] += 1
else:
conn_supplied[hanger_from_door(door)] += 1
def find_sector(r_name, sectors):
for s in sectors:
if r_name in s.region_set():
return s
return None
def assign_location_sectors(dungeon_map, free_location_sectors, global_pole):
valid = False
choices = None
sector_list = list(free_location_sectors)
random.shuffle(sector_list)
while not valid:
choices, d_idx, totals = weighted_random_locations(dungeon_map, sector_list)
for i, sector in enumerate(sector_list):
choice = d_idx[choices[i].name]
totals[choice] += sector.chest_locations
valid = True
for d_name, idx in d_idx.items():
if totals[idx] < 5:
valid = False
break
for i, choice in enumerate(choices):
builder = dungeon_map[choice.name]
assign_sector(sector_list[i], builder, free_location_sectors, global_pole)
def weighted_random_locations(dungeon_map, free_location_sectors):
population = []
ttl_assigned = 0
weights = []
totals = []
d_idx = {}
for i, dungeon_builder in enumerate(dungeon_map.values()):
population.append(dungeon_builder)
totals.append(dungeon_builder.location_cnt)
ttl_assigned += dungeon_builder.location_cnt
weights.append(6.375)
d_idx[dungeon_builder.name] = i
average = ttl_assigned / 13
for i, db in enumerate(population):
if db.location_cnt < average:
weights[i] += average - db.location_cnt
if db.location_cnt > average:
weights[i] = max(0, weights[i] - db.location_cnt + average)
choices = random.choices(population, weights, k=len(free_location_sectors))
return choices, d_idx, totals
def assign_crystal_switch_sectors(dungeon_map, crystal_switches, crystal_barriers, global_pole, assign_one=False):
population = []
some_c_switches_present = False
for name, builder in dungeon_map.items():
if builder.c_switch_required and not builder.c_switch_present and not builder.c_locked:
population.append(name)
if builder.c_switch_present and not builder.c_locked:
some_c_switches_present = True
if len(population) == 0:
if assign_one and not some_c_switches_present:
if len(crystal_switches) == 0:
raise GenerationException('No crystal switches to assign. Ref %s' % next(iter(dungeon_map.keys())))
valid, builder_choice, switch_choice = False, None, None
switch_candidates = list(crystal_switches)
switch_choice = random.choice(switch_candidates)
switch_candidates.remove(switch_choice)
builder_candidates = [name for name, builder in dungeon_map.items() if not builder.c_locked]
while not valid:
if len(builder_candidates) == 0:
if len(switch_candidates) == 0:
raise GenerationException('No where to assign crystal switch. Ref %s' % next(iter(dungeon_map.keys())))
switch_choice = random.choice(switch_candidates)
switch_candidates.remove(switch_choice)
builder_candidates = list(dungeon_map.keys())
choice = random.choice(builder_candidates)
builder_candidates.remove(choice)
builder_choice = dungeon_map[choice]
test_set = [switch_choice]
test_set.extend(crystal_barriers)
valid = global_pole.is_valid_choice(dungeon_map, builder_choice, test_set)
assign_sector(switch_choice, builder_choice, crystal_switches, global_pole)
return crystal_switches
if len(crystal_switches) == 0:
raise GenerationException('No crystal switches to assign')
sector_list = list(crystal_switches)
if len(population) > len(sector_list):
raise GenerationException('Not enough crystal switch sectors for those needed')
choices = random.sample(sector_list, k=len(population))
for i, choice in enumerate(choices):
builder = dungeon_map[population[i]]
assign_sector(choice, builder, crystal_switches, global_pole)
return crystal_switches
def ensure_crystal_switches_reachable(dungeon_map, crystal_switches, polarized_sectors, crystal_barriers, global_pole):
invalid_builders = []
for name, builder in dungeon_map.items():
if builder.c_switch_present and builder.c_switch_required and not builder.c_locked:
invalid_builders.append(builder)
while len(invalid_builders) > 0:
valid_builders = []
for builder in invalid_builders:
entrance_sectors = []
reachable_crystals = defaultdict()
for sector in builder.sectors:
if sector.equations is None:
sector.equations = calc_sector_equations(sector)
if sector.is_entrance_sector() and not sector.destination_entrance:
need_switch = True
for region in sector.get_start_regions():
if region.crystal_switch:
need_switch = False
break
any_benefit = False
for eq in sector.equations:
if len(eq.benefit) > 0:
any_benefit = True
break
if need_switch and any_benefit:
entrance_sectors.append(sector)
for eq in sector.equations:
if eq.c_switch:
reachable_crystals[hook_from_door(eq.door)] = True
valid_ent_sectors = []
for entrance_sector in entrance_sectors:
other_sectors = [x for x in builder.sectors if x != entrance_sector]
reachable, access = is_c_switch_reachable(entrance_sector, reachable_crystals, other_sectors)
if reachable:
valid_ent_sectors.append(entrance_sector)
else:
candidates = {}
for c in find_pol_cand_for_c_switch(access, reachable_crystals, polarized_sectors):
candidates[c] = 'Polarized'
for c in find_crystal_cand(access, crystal_switches):
candidates[c] = 'Crystals'
for c in find_pol_cand_for_c_switch(access, reachable_crystals, crystal_barriers):
candidates[c] = 'Barriers'
valid, sector, which_list = False, None, None
while not valid:
if len(candidates) <= 0:
raise GenerationException(f'need to provide more sophisticated crystal connection for {entrance_sector}')
sector, which_list = random.choice(list(candidates.items()))
del candidates[sector]
valid = global_pole.is_valid_choice(dungeon_map, builder, [sector])
if which_list == 'Polarized':
assign_sector(sector, builder, polarized_sectors, global_pole)
elif which_list == 'Crystals':
assign_sector(sector, builder, crystal_switches, global_pole)
elif which_list == 'Barriers':
assign_sector(sector, builder, crystal_barriers, global_pole)
entrance_sectors = [x for x in entrance_sectors if x not in valid_ent_sectors]
if len(entrance_sectors) == 0:
valid_builders.append(builder)
invalid_builders = [x for x in invalid_builders if x not in valid_builders]
def is_c_switch_reachable(entrance_sector, reachable_crystals, other_sectors):
current_access = {}
for eq in entrance_sector.equations:
if eq.total_cost() <= 0:
for key, door_list in eq.benefit.items():
for door in door_list:
if door not in eq.crystal_blocked.keys() or eq.crystal_blocked[door] != CrystalBarrier.Blue:
current_access[key] = True
break
for key, flag in current_access.items():
if opposite_h_type(key) in reachable_crystals.keys():
return True, {}
changed = True
while changed:
changed = False
for sector in other_sectors:
for eq in sector.equations:
key, cost_door = eq.cost
if key in current_access.keys() and current_access[key]:
for bene_key, door_list in eq.benefit.items():
for door in door_list:
block_dict = eq.crystal_blocked
if door not in block_dict.keys() or block_dict[door] != CrystalBarrier.Blue:
if bene_key not in current_access.keys():
current_access[bene_key] = True
changed = True
break
for key, flag in current_access.items():
if opposite_h_type(key) in reachable_crystals.keys():
return True, {}
return False, current_access
def find_pol_cand_for_c_switch(access, reachable_crystals, polarized_candidates):
candidates = []
for sector in polarized_candidates:
if pol_cand_matches_access_reach(sector, access, reachable_crystals):
candidates.append(sector)
return candidates
def pol_cand_matches_access_reach(sector, access, reachable_crystals):
if sector.equations is None:
sector.equations = calc_sector_equations(sector)
for eq in sector.equations:
key, cost_door = eq.cost
if key in access.keys() and access[key]:
for bene_key, door_list in eq.benefit.items():
for door in door_list:
if door not in eq.crystal_blocked.keys() or eq.crystal_blocked[door] != CrystalBarrier.Blue:
if opposite_h_type(bene_key) in reachable_crystals.keys():
return True
return False
def find_crystal_cand(access, crystal_switches):
candidates = []
for sector in crystal_switches:
if crystal_cand_matches_access(sector, access):
candidates.append(sector)
return candidates
def crystal_cand_matches_access(sector, access):
if sector.equations is None:
sector.equations = calc_sector_equations(sector)
for eq in sector.equations:
key, cost_door = eq.cost
if key in access.keys() and access[key] and eq.c_switch and len(sector.outstanding_doors) > 1:
return True
return False
def assign_crystal_barrier_sectors(dungeon_map, crystal_barriers, global_pole):
population = []
for name, builder in dungeon_map.items():
if builder.c_switch_present and not builder.c_locked:
population.append(name)
sector_list = list(crystal_barriers)
random.shuffle(sector_list)
choices = random.choices(population, k=len(sector_list))
for i, choice in enumerate(choices):
builder = dungeon_map[choice]
assign_sector(sector_list[i], builder, crystal_barriers, global_pole)
def identify_polarity_issues(dungeon_map):
unconnected_builders = {}
for name, builder in dungeon_map.items():
identify_polarity_issues_internal(name, builder, unconnected_builders)
return unconnected_builders
def identify_polarity_issues_internal(name, builder, unconnected_builders):
if len(builder.sectors) == 1:
return
else:
def sector_filter(x, y):
return x != y
connection_flags = {}
for slot in PolSlot:
connection_flags[slot] = {}
for slot2 in PolSlot:
connection_flags[slot][slot2] = False
for sector in builder.sectors:
others = [x for x in builder.sectors if sector_filter(x, sector)]
other_mag = sum_magnitude(others)
sector_mag = sector.magnitude()
check_flags(sector_mag, connection_flags)
unconnected_sector = True
for i in PolSlot:
if sector_mag[i.value] == 0 or other_mag[i.value] > 0 or self_connecting(sector, i, sector_mag):
unconnected_sector = False
break
if unconnected_sector:
for i in PolSlot:
if sector_mag[i.value] > 0 and other_mag[i.value] == 0 and not self_connecting(sector, i, sector_mag):
builder.mag_needed[i] = [x for x in PolSlot if other_mag[x.value] > 0]
if name not in unconnected_builders.keys():
unconnected_builders[name] = builder
ttl_mag = sum_magnitude(builder.sectors)
for slot in PolSlot:
for slot2 in PolSlot:
if ttl_mag[slot.value] > 0 and ttl_mag[slot2.value] > 0 and not connection_flags[slot][slot2]:
builder.mag_needed[slot] = [slot2]
builder.mag_needed[slot2] = [slot]
if name not in unconnected_builders.keys():
unconnected_builders[name] = builder
def self_connecting(sector, slot, magnitude):
return sector.polarity()[slot.value] == 0 and sum(magnitude) > magnitude[slot.value]
def check_flags(sector_mag, connection_flags):
for slot in PolSlot:
for slot2 in PolSlot:
if sector_mag[slot.value] > 0 and sector_mag[slot2.value] > 0:
connection_flags[slot][slot2] = True
if slot != slot2:
for check_slot in PolSlot:
if check_slot not in [slot, slot2] and connection_flags[slot2][check_slot]:
connection_flags[slot][check_slot] = True
connection_flags[check_slot][slot] = True
def identify_simple_branching_issues(dungeon_map):
problem_builders = {}
for name, builder in dungeon_map.items():
if name == 'Skull Woods 2':
builder.conn_supplied[Hook.West] += 1
builder.conn_needed[Hook.East] -= 1
builder.forced_loops = calc_forced_loops(builder.sectors)
if builder.dead_ends + builder.forced_loops * 2 > builder.branches + builder.allowance:
problem_builders[name] = builder
for h_type in Hook:
lack = builder.conn_balance[h_type] = builder.conn_supplied[h_type] - builder.conn_needed[h_type]
if lack < 0:
builder.total_conn_lack += -lack
problem_builders[name] = builder
return problem_builders
def calc_forced_loops(sector_list):
forced_loops = 0
for sector in sector_list:
h_mag = sector.hook_magnitude()
other_sectors = [x for x in sector_list if x != sector]
other_mag = sum_hook_magnitude(other_sectors)
loop_parts = 0
for hook in Hook:
opp = opposite_h_type(hook).value
if h_mag[hook.value] > other_mag[opp] and loop_present(hook, opp, h_mag, other_mag):
loop_parts += (h_mag[hook.value] - other_mag[opp]) / 2
forced_loops += math.floor(loop_parts)
return forced_loops
def loop_present(hook, opp, h_mag, other_mag):
if hook == Hook.Stairs:
return h_mag[hook.value] - other_mag[opp] >= 2
else:
return h_mag[opp] >= h_mag[hook.value] - other_mag[opp]
def is_satisfied(door_dict_list):
for door_dict in door_dict_list:
for door_list in door_dict.values():
if len(door_list) > 0:
return False
return True
def filter_match_deps(candidate, match_deps):
return [x for x in match_deps if x != candidate]
def sum_magnitude(sector_list):
result = [0] * len(PolSlot)
for sector in sector_list:
vector = sector.magnitude()
for i in range(len(result)):
result[i] = result[i] + vector[i]
return result
def sum_hook_magnitude(sector_list):
result = [0] * len(Hook)
for sector in sector_list:
vector = sector.hook_magnitude()
for i in range(len(result)):
result[i] = result[i] + vector[i]
return result
def sum_polarity(sector_list):
pol = Polarity()
for sector in sector_list:
pol += sector.polarity()
return pol
def assign_polarized_sectors(dungeon_map, polarized_sectors, global_pole, builder_info):
unconnected_builders = identify_polarity_issues(dungeon_map)
while len(unconnected_builders) > 0:
for name, builder in unconnected_builders.items():
candidates = find_connection_candidates(builder.mag_needed, polarized_sectors)
valid, sector = False, None
while not valid:
if len(candidates) == 0:
raise GenerationException('Cross Dungeon Builder: Cannot find a candidate for connectedness. %s' % name)
sector = random.choice(candidates)
candidates.remove(sector)
valid = global_pole.is_valid_choice(dungeon_map, builder, [sector])
assign_sector(sector, builder, polarized_sectors, global_pole)
builder.mag_needed = {}
unconnected_builders = identify_polarity_issues(unconnected_builders)
problem_builders = identify_simple_branching_issues(dungeon_map)
while len(problem_builders) > 0:
for name, builder in problem_builders.items():
candidates, charges = find_simple_branching_candidates(builder, polarized_sectors)
best = min(charges)
best_candidates = [x for i, x in enumerate(candidates) if charges[i] <= best]
valid, choice = False, None
while not valid:
if len(best_candidates) == 0:
if len(candidates) == 0:
raise GenerationException('Cross Dungeon Builder: Simple branch problems: %s' % name)
best = min(charges)
best_candidates = [x for i, x in enumerate(candidates) if charges[i] <= best]
choice = random.choice(best_candidates)
best_candidates.remove(choice)
i = candidates.index(choice)
candidates.pop(i)
charges.pop(i)
valid = global_pole.is_valid_choice(dungeon_map, builder, [choice]) and valid_connected_assignment(builder, [choice])
assign_sector(choice, builder, polarized_sectors, global_pole)
builder.total_conn_lack = 0
builder.conn_balance.clear()
problem_builders = identify_simple_branching_issues(problem_builders)
polarity_step_3(dungeon_map, polarized_sectors, global_pole)
neutral_choices: List[List] = neutralize_the_rest(polarized_sectors)
problem_builders = identify_branching_issues(dungeon_map, builder_info)
while len(problem_builders) > 0:
for name, builder in problem_builders.items():
candidates = find_branching_candidates(builder, neutral_choices, builder_info)
valid, choice = False, None
while not valid:
if len(candidates) <= 0:
raise GenerationException('Cross Dungeon Builder: Complex branch problems: %s' % name)
choice = random.choice(candidates)
candidates.remove(choice)
valid = global_pole.is_valid_choice(dungeon_map, builder, choice) and valid_polarized_assignment(builder, choice)
neutral_choices.remove(choice)
for sector in choice:
assign_sector(sector, builder, polarized_sectors, global_pole)
builder.unfulfilled.clear()
problem_builders = identify_branching_issues(problem_builders, builder_info)
comb_w_replace = len(dungeon_map) ** len(neutral_choices)
combinations = None
if comb_w_replace <= 1000:
combinations = list(itertools.product(dungeon_map.keys(), repeat=len(neutral_choices)))
random.shuffle(combinations)
tries = 0
while len(polarized_sectors) > 0:
if tries > 1000 or (combinations and tries >= len(combinations)):
raise GenerationException('No valid assignment found. Ref: %s' % next(iter(dungeon_map.keys())))
if combinations:
choices = combinations[tries]
else:
choices = random.choices(list(dungeon_map.keys()), k=len(neutral_choices))
chosen_sectors = defaultdict(list)
for i, choice in enumerate(choices):
chosen_sectors[choice].extend(neutral_choices[i])
all_valid = True
for name, sector_list in chosen_sectors.items():
if not valid_assignment(dungeon_map[name], sector_list, builder_info):
all_valid = False
break
if all_valid:
for i, choice in enumerate(choices):
builder = dungeon_map[choice]
for sector in neutral_choices[i]:
assign_sector(sector, builder, polarized_sectors, global_pole)
tries += 1
def polarity_step_3(dungeon_map, polarized_sectors, global_pole):
odd_builders = [x for x in dungeon_map.values() if sum_polarity(x.sectors).charge() % 2 != 0]
grouped_choices: List[List] = find_forced_groupings(polarized_sectors, dungeon_map)
random.shuffle(odd_builders)
odd_candidates = find_odd_sectors(grouped_choices)
tries = 0
while len(odd_builders) > 0:
if tries > 1000:
raise GenerationException('Unable to fix dungeon parity. Ref: %s' % next(iter(odd_builders)).name)
best_choices = None
best_charge = sum([x.polarity().charge() for x in dungeon_map.values()])
samples = 0
combos = ncr(len(odd_candidates), len(odd_builders))
sample_target = 100 if combos > 10 else combos * 2
while best_choices is None or samples < sample_target:
samples += 1
if len(odd_candidates) < len(odd_builders):
raise GenerationException(f'Unable to fix dungeon parity - not enough candidates.'
f' Ref: {next(iter(odd_builders)).name}')
choices = random.sample(odd_candidates, k=len(odd_builders))
valid = global_pole.is_valid_multi_choice(dungeon_map, odd_builders, choices)
charge = calc_total_charge(dungeon_map, odd_builders, choices)
if valid and charge < best_charge:
best_choices = choices
best_charge = charge
if samples > sample_target and best_choices is None:
best_choices = choices
best_charge = charge
samples = 0
all_valid = True
for i, candidate_list in enumerate(best_choices):
test_set = find_forced_connections(dungeon_map, candidate_list, polarized_sectors)
builder = odd_builders[i]
if ensure_test_set_connectedness(test_set, builder, polarized_sectors, dungeon_map, global_pole):
all_valid &= valid_branch_only(builder, candidate_list)
else:
all_valid = False
break
if not all_valid:
break
if all_valid:
for i, candidate_list in enumerate(best_choices):
builder = odd_builders[i]
for sector in candidate_list:
assign_sector(sector, builder, polarized_sectors, global_pole)
odd_builders = [x for x in dungeon_map.values() if sum_polarity(x.sectors).charge() % 2 != 0]
else:
tries += 1
parallel_full_neutralization(dungeon_map, polarized_sectors, global_pole)
def parallel_full_neutralization(dungeon_map, polarized_sectors, global_pole):
start = time.process_time()
builders = list(dungeon_map.values())
finished = all([x.polarity().is_neutral() for x in builders])
solution_list, current_depth = defaultdict(list), 1
complete_builders = [x for x in builders if x.polarity().is_neutral()]
avail_sectors = list(polarized_sectors)
while not finished:
builders_to_check = [x for x in builders if not (x.polarity()+sum_polarity(solution_list[x])).is_neutral()]
candidates, last_depth = find_exact_neutralizing_candidates_parallel_db(builders_to_check, solution_list,
avail_sectors, current_depth)
increment_depth = True
any_valid = False
for builder, candidate_list in candidates.items():
valid, sectors = False, None
while not valid:
if len(candidate_list) == 0:
increment_depth = False
break
sectors = random.choice(candidate_list)
candidate_list.remove(sectors)
proposal = solution_list.copy()
proposal[builder] = list(proposal[builder])
proposal[builder].extend(sectors)
valid = global_pole.is_valid_multi_choice_2(dungeon_map, builders, proposal)
if valid:
any_valid = True
solution_list[builder].extend(sectors)
for sector in sectors:
avail_sectors.remove(sector)
complete_builders.append(builder)
for other_builder, other_cand_list in candidates.items():
if other_builder not in complete_builders:
candidates_to_remove = list()
for candidate in other_cand_list:
for sector in sectors:
if sector in candidate:
candidates_to_remove.append(candidate)
break
other_cand_list[:] = [x for x in other_cand_list if x not in candidates_to_remove]
if not any_valid:
increment_depth = True
current_depth = last_depth + 1 if increment_depth else last_depth
finished = all([(x.polarity()+sum_polarity(solution_list[x])).is_neutral() for x in builders])
logging.getLogger('').info(f'-Balanced solution found in {time.process_time()-start}')
for builder, sectors in solution_list.items():
for sector in sectors:
assign_sector(sector, builder, polarized_sectors, global_pole)
def find_forced_connections(dungeon_map, candidate_list, polarized_sectors):
test_set = list(candidate_list)
other_sectors = [x for x in polarized_sectors if x not in candidate_list]
dungeon_hooks = defaultdict(int)
for name, builder in dungeon_map.items():
d_mag = sum_hook_magnitude(builder.sectors)
for val in Hook:
dungeon_hooks[val] += d_mag[val.value]
queue = deque(candidate_list)
while queue:
candidate = queue.popleft()
c_mag = candidate.hook_magnitude()
other_candidates = [x for x in candidate_list if x != candidate]
for val in Hook:
if c_mag[val.value] > 0:
opp = opposite_h_type(val)
o_val = opp.value
if sum_hook_magnitude(other_candidates)[o_val] == 0 and dungeon_hooks[opp] == 0 and not valid_self(c_mag, val, opp):
forced_sector = []
for sec in other_sectors:
if sec.hook_magnitude()[o_val] > 0:
forced_sector.append(sec)
if len(forced_sector) > 1:
break
if len(forced_sector) == 1:
test_set.append(forced_sector[0])
return test_set
def valid_self(c_mag, val, opp):
if val == Hook.Stairs:
return c_mag[val.value] > 2
else:
return c_mag[opp.value] > 0 and sum(c_mag) > 2
def ensure_test_set_connectedness(test_set, builder, polarized_sectors, dungeon_map, global_pole):
test_copy = list(test_set)
while not valid_connected_assignment(builder, test_copy):
dummy_builder = DungeonBuilder("Dummy Builder for " + builder.name)
dummy_builder.sectors = builder.sectors + test_copy
possibles = [x for x in polarized_sectors if x not in test_copy]
candidates = find_connected_candidates(possibles)
valid, sector = False, None
while not valid:
if len(candidates) == 0:
return False
sector = random.choice(candidates)
candidates.remove(sector)
t2 = test_copy+[sector]
valid = global_pole.is_valid_choice(dungeon_map, builder, t2) and valid_branch_only(builder, t2)
test_copy.append(sector)
dummy_builder.sectors = builder.sectors + test_copy
test_set[:] = test_copy
return True
def calc_total_charge(dungeon_map, builders, sector_lists):
polarity_list = [x.polarity() for x in dungeon_map.values() if x not in builders]
for i, sectors in enumerate(sector_lists):
builder = builders[i]
polarity = builder.polarity() + sum_polarity(sectors)
polarity_list.append(polarity)
return sum([x.charge() for x in polarity_list])
class GlobalPolarity:
def __init__(self, candidate_sectors):
self.positives = [0, 0, 0]
self.negatives = [0, 0, 0]
self.evens = 0
self.odds = 0
for sector in candidate_sectors:
pol = sector.polarity()
if pol.charge() % 2 == 0:
self.evens += 1
else:
self.odds += 1
for slot in PolSlot:
if pol.vector[slot.value] < 0:
self.negatives[slot.value] += -pol.vector[slot.value]
elif pol.vector[slot.value] > 0:
self.positives[slot.value] += pol.vector[slot.value]
def copy(self):
gp = GlobalPolarity([])
gp.positives = self.positives.copy()
gp.negatives = self.negatives.copy()
gp.evens = self.evens
gp.odds = self.odds
return gp
def is_valid(self, dungeon_map):
polarities = [x.polarity() for x in dungeon_map.values()]
return self._check_parity(polarities) and self._is_valid_polarities(polarities)
def _check_parity(self, polarities):
local_evens = 0
local_odds = 0
for pol in polarities:
if pol.charge() % 2 == 0:
local_evens += 1
else:
local_odds += 1
if local_odds > self.odds:
return False
return True
def _is_valid_polarities(self, polarities):
positives = self.positives.copy()
negatives = self.negatives.copy()
for polarity in polarities:
for slot in PolSlot:
if polarity[slot.value] > 0 and slot != PolSlot.Stairs:
if negatives[slot.value] >= polarity[slot.value]:
negatives[slot.value] -= polarity[slot.value]
else:
return False
elif polarity[slot.value] < 0 and slot != PolSlot.Stairs:
if positives[slot.value] >= -polarity[slot.value]:
positives[slot.value] += polarity[slot.value]
else:
return False
elif slot == PolSlot.Stairs:
if positives[slot.value] >= polarity[slot.value]:
positives[slot.value] -= polarity[slot.value]
else:
return False
return True
def consume(self, sector):
polarity = sector.polarity()
if polarity.charge() % 2 == 0:
self.evens -= 1
else:
self.odds -= 1
for slot in PolSlot:
if polarity[slot.value] > 0 and slot != PolSlot.Stairs:
if self.positives[slot.value] >= polarity[slot.value]:
self.positives[slot.value] -= polarity[slot.value]
else:
raise GenerationException('Invalid assignment of %s' % sector.name)
elif polarity[slot.value] < 0 and slot != PolSlot.Stairs:
if self.negatives[slot.value] >= -polarity[slot.value]:
self.negatives[slot.value] += polarity[slot.value]
else:
raise GenerationException('Invalid assignment of %s' % sector.name)
elif slot == PolSlot.Stairs:
if self.positives[slot.value] >= polarity[slot.value]:
self.positives[slot.value] -= polarity[slot.value]
else:
raise GenerationException('Invalid assignment of %s' % sector.name)
def is_valid_choice(self, dungeon_map, builder, sectors):
proposal = self.copy()
non_neutral_polarities = [x.polarity() for x in dungeon_map.values() if not x.polarity().is_neutral() and x != builder]
current_polarity = builder.polarity() + sum_polarity(sectors)
non_neutral_polarities.append(current_polarity)
for sector in sectors:
proposal.consume(sector)
return proposal._check_parity(non_neutral_polarities) and proposal._is_valid_polarities(non_neutral_polarities)
def is_valid_multi_choice(self, dungeon_map, builders, sector_lists):
proposal = self.copy()
non_neutral_polarities = [x.polarity() for x in dungeon_map.values() if not x.polarity().is_neutral()
and x not in builders]
for i, sectors in enumerate(sector_lists):
builder = builders[i]
current_polarity = builder.polarity() + sum_polarity(sectors)
non_neutral_polarities.append(current_polarity)
for sector in sectors:
proposal.consume(sector)
return proposal._check_parity(non_neutral_polarities) and proposal._is_valid_polarities(non_neutral_polarities)
def is_valid_multi_choice_2(self, dungeon_map, builders, sector_dict):
proposal = self.copy()
non_neutral_polarities = [x.polarity() for x in dungeon_map.values() if not x.polarity().is_neutral()
and x not in builders]
for builder, sectors in sector_dict.items():
current_polarity = builder.polarity() + sum_polarity(sectors)
non_neutral_polarities.append(current_polarity)
for sector in sectors:
proposal.consume(sector)
return proposal._check_parity(non_neutral_polarities) and proposal._is_valid_polarities(non_neutral_polarities)
def find_connection_candidates(mag_needed, sector_pool):
candidates = []
for sector in sector_pool:
if sector.branching_factor() < 2:
continue
mag = sector.magnitude()
matches = False
for slot, match_slot in mag_needed.items():
if mag[slot.value] > 0:
for i in PolSlot:
if i in match_slot and mag[i.value] > 0:
matches = True
break
if matches:
candidates.append(sector)
return candidates
def find_simple_branching_candidates(builder, sector_pool):
candidates = defaultdict(list)
charges = defaultdict(list)
outflow_needed = builder.dead_ends + builder.forced_loops * 2 > builder.branches + builder.allowance
total_needed = builder.dead_ends + builder.forced_loops * 2 - builder.branches + builder.allowance
original_lack = builder.total_conn_lack
best_lack = original_lack
for sector in sector_pool:
if outflow_needed and sector.branching_factor() <= 2:
continue
calc_sector_balance(sector)
ttl_lack = 0
for hook in Hook:
lack = builder.conn_balance[hook] + sector.conn_balance[hook]
if lack < 0:
ttl_lack += -lack
forced_loops = calc_forced_loops(builder.sectors + [sector])
net_outflow = builder.dead_ends + forced_loops * 2 + sector.dead_ends() - builder.branches - builder.allowance - sector.branches()
valid_branches = net_outflow < total_needed
if valid_branches and (ttl_lack < original_lack or original_lack >= 0):
candidates[ttl_lack].append(sector)
charges[ttl_lack].append((builder.polarity()+sector.polarity()).charge())
if ttl_lack < best_lack:
best_lack = ttl_lack
if best_lack == original_lack and not outflow_needed:
raise GenerationException('These candidates may not help at all')
if len(candidates[best_lack]) <= 0:
raise GenerationException('Nothing can fix the simple branching issue. Panic ensues.')
return candidates[best_lack], charges[best_lack]
def calc_sector_balance(sector):
if sector.conn_balance is None:
sector.conn_balance = defaultdict(int)
for door in sector.outstanding_doors:
if door.blocked or door.dead or sector.branching_factor() <= 1:
sector.conn_balance[hook_from_door(door)] -= 1
else:
sector.conn_balance[hanger_from_door(door)] += 1
def find_odd_sectors(grouped_candidates):
return [x for x in grouped_candidates if sum_polarity(x).charge() % 2 != 0]
def find_exact_neutralizing_candidates_parallel_db(builders, proposal, avail_sectors, current_depth):
candidate_map = defaultdict(list)
polarity_map = {}
for builder in builders:
polarity_map[builder] = builder.polarity() + sum_polarity(proposal[builder])
finished = False
db, index = create_db_for_depth(current_depth, avail_sectors)
while not finished:
depth_map = db[current_depth]
for builder in builders:
target = polarity_map[builder].complement()
if target in depth_map.keys():
finished = True
candidate_map[builder].extend(depth_map[target].keys())
if finished:
for builder in list(candidate_map.keys()):
try:
candidate_map[builder] = weed_candidates(builder, {0: candidate_map[builder]}, 0)
except NeutralizingException:
del candidate_map[builder]
if len(candidate_map) == 0:
finished = False
if not finished:
current_depth += 1
add_depth_to_db(db, index, current_depth, avail_sectors)
return candidate_map, current_depth
def create_db_for_depth(depth, avail_sectors):
db = {0: {Polarity(): {OrderedFrozenSet(): None}}}
db_index = {Polarity()}
for i in range(1, depth+1):
add_depth_to_db(db, db_index, i, avail_sectors)
return db, db_index
def add_depth_to_db(db, db_index, i, avail_sectors):
previous = db[i-1]
depth_map = defaultdict(dict)
index_additions = set()
for sector in avail_sectors:
sector_set = {sector}
sector_pol = sector.polarity()
for polarity, choices in previous.items():
combo_pol = sector_pol + polarity
if combo_pol not in db_index:
index_additions.add(combo_pol)
for choice in choices:
if sector in choice.frozen_set:
continue
new_set = choice.new_with_element(sector_set)
depth_map[combo_pol][new_set] = None
for addition in index_additions:
if len(depth_map[addition]) > 0:
db_index.add(addition)
else:
del depth_map[addition]
if len(depth_map) == 0:
raise NeutralizingException('There is not a solution for this particular combination. Crystal switch issue?')
db[i] = depth_map
class OrderedFrozenSet:
def __init__(self):
self.frozen_set = frozenset()
self.order = []
def __eq__(self, other):
return self.frozen_set == other.frozen_set
def __hash__(self):
return hash(self.frozen_set)
def __iter__(self):
return self.order.__iter__()
def __len__(self):
return len(self.order)
def new_with_element(self, elements):
ret = OrderedFrozenSet()
ret.frozen_set = frozenset(self.frozen_set | elements)
ret.order = list(self.order)
ret.order.extend(elements)
return ret
def weed_candidates(builder, candidates, best_charge):
official_cand = []
while len(official_cand) == 0:
if len(candidates.keys()) == 0:
raise NeutralizingException('Cross Dungeon Builder: Weeded out all candidates %s' % builder.name)
while best_charge not in candidates.keys():
best_charge += 1
candidate_list = candidates.pop(best_charge)
best_lack = None
for cand in candidate_list:
ttl_deads = 0
ttl_branches = 0
for sector in cand:
calc_sector_balance(sector)
ttl_deads += sector.dead_ends()
ttl_branches += sector.branches()
ttl_lack = 0
ttl_balance = 0
for hook in Hook:
bal = 0
for sector in cand:
bal += sector.conn_balance[hook]
lack = builder.conn_balance[hook] + bal
ttl_balance += lack
if lack < 0:
ttl_lack += -lack
forced_loops = calc_forced_loops(builder.sectors + list(cand))
if ttl_balance >= 0 and builder.dead_ends + ttl_deads + forced_loops * 2 <= builder.branches + ttl_branches + builder.allowance:
if best_lack is None or ttl_lack < best_lack:
best_lack = ttl_lack
official_cand = [cand]
elif ttl_lack == best_lack:
official_cand.append(cand)
# choose from among those that use less
best_len = None
cand_len = []
for cand in official_cand:
size = len(cand)
if best_len is None or size < best_len:
best_len = size
cand_len = [cand]
elif size == best_len:
cand_len.append(cand)
return cand_len
def find_branching_candidates(builder, neutral_choices, builder_info):
candidates = []
for choice in neutral_choices:
resolved, problem_list = check_for_valid_layout(builder, choice, builder_info)
if resolved:
candidates.append(choice)
return candidates
def find_connected_candidates(sector_pool):
candidates = []
for sector in sector_pool:
if sector.adj_outflow() >= 2:
candidates.append(sector)
return candidates
def neutralize_the_rest(sector_pool):
neutral_choices = []
main_pool = list(sector_pool)
failed_pool = []
r_size = 1
while len(main_pool) > 0 or len(failed_pool) > 0:
if len(main_pool) <= r_size:
main_pool.extend(failed_pool)
failed_pool.clear()
r_size += 1
candidate = random.choice(main_pool)
main_pool.remove(candidate)
if r_size > len(main_pool):
raise GenerationException("Cross Dungeon Builder: no more neutral pairings possible")
combinations = ncr(len(main_pool), r_size)
itr = 0
done = False
while not done:
ttl_polarity = candidate.polarity()
choice_set = kth_combination(itr, main_pool, r_size)
for choice in choice_set:
ttl_polarity += choice.polarity()
if ttl_polarity.is_neutral():
choice_set.append(candidate)
neutral_choices.append(choice_set)
main_pool = [x for x in main_pool if x not in choice_set]
failed_pool = [x for x in failed_pool if x not in choice_set]
done = True
else:
itr += 1
if itr >= combinations:
failed_pool.append(candidate)
done = True
return neutral_choices
# doesn't force a grouping when all in the found_list comes from the same sector
def find_forced_groupings(sector_pool, dungeon_map):
dungeon_hooks = {}
for name, builder in dungeon_map.items():
dungeon_hooks[name] = categorize_groupings(builder.sectors)
groupings = []
queue = deque(sector_pool)
skips = set()
while len(queue) > 0:
grouping = queue.popleft()
is_list = isinstance(grouping, List)
if not is_list and grouping in skips:
continue
grouping = grouping if is_list else [grouping]
hook_categories = categorize_groupings(grouping)
force_found = False
for val in Hook:
if val in hook_categories.keys():
required_doors, flexible_doors = hook_categories[val]
if len(required_doors) >= 1:
opp = opposite_h_type(val)
found_list = []
if opp in hook_categories.keys() and len(hook_categories[opp][1]) > 0:
found_list.extend(hook_categories[opp][1])
for name, hooks in dungeon_hooks.items():
if opp in hooks.keys() and len(hooks[opp][1]) > 0:
found_list.extend(hooks[opp][1])
other_sectors = [x for x in sector_pool if x not in grouping]
other_sector_cats = categorize_groupings(other_sectors)
if opp in other_sector_cats.keys() and len(other_sector_cats[opp][1]) > 0:
found_list.extend(other_sector_cats[opp][1])
if len(required_doors) == len(found_list):
forced_sectors = []
for sec in other_sectors:
cats = categorize_groupings([sec])
if opp in cats.keys() and len(cats[opp][1]) > 0:
forced_sectors.append(sec)
if len(forced_sectors) > 0:
grouping.extend(forced_sectors)
skips.update(forced_sectors)
merge_groups = []
for group in groupings:
for sector in group:
if sector in forced_sectors:
merge_groups.append(group)
for merge in merge_groups:
grouping = list(set(grouping).union(set(merge)))
groupings.remove(merge)
queue.append(grouping)
force_found = True
elif len(flexible_doors) == 1:
opp = opposite_h_type(val)
found_list = []
if opp in hook_categories.keys() and (len(hook_categories[opp][0]) > 0 or len(hook_categories[opp][1]) > 0):
found_list.extend(hook_categories[opp][0])
found_list.extend([x for x in hook_categories[opp][1] if x not in flexible_doors])
for name, hooks in dungeon_hooks.items():
if opp in hooks.keys() and (len(hooks[opp][0]) > 0 or len(hooks[opp][1]) > 0):
found_list.extend(hooks[opp][0])
found_list.extend(hooks[opp][1])
other_sectors = [x for x in sector_pool if x not in grouping]
other_sector_cats = categorize_groupings(other_sectors)
if opp in other_sector_cats.keys() and (len(other_sector_cats[opp][0]) > 0 or len(other_sector_cats[opp][1]) > 0):
found_list.extend(other_sector_cats[opp][0])
found_list.extend(other_sector_cats[opp][1])
if len(found_list) == 1:
forced_sectors = []
for sec in other_sectors:
cats = categorize_groupings([sec])
if opp in cats.keys() and (len(cats[opp][0]) > 0 or len(cats[opp][1]) > 0):
forced_sectors.append(sec)
if len(forced_sectors) > 0:
grouping.extend(forced_sectors)
skips.update(forced_sectors)
merge_groups = []
for group in groupings:
for sector in group:
if sector in forced_sectors:
merge_groups.append(group)
for merge in merge_groups:
grouping += merge
groupings.remove(merge)
queue.append(grouping)
force_found = True
if force_found:
break
if not force_found:
groupings.append(grouping)
return groupings
def categorize_groupings(sectors):
hook_categories = {}
for sector in sectors:
for door in sector.outstanding_doors:
hook = hook_from_door(door)
if hook not in hook_categories.keys():
hook_categories[hook] = ([], [])
if door.blocked or door.dead:
hook_categories[hook][0].append(door)
else:
hook_categories[hook][1].append(door)
return hook_categories
def valid_assignment(builder, sector_list, builder_info):
if not valid_entrance(builder, sector_list, builder_info):
return False
if not valid_c_switch(builder, sector_list):
return False
if not valid_polarized_assignment(builder, sector_list):
return False
resolved, problems = check_for_valid_layout(builder, sector_list, builder_info)
return resolved
def valid_entrance(builder, sector_list, builder_info):
is_dead_end = False
if len(builder.sectors) == 0:
is_dead_end = True
else:
entrances, splits, c_tuple, world, player = builder_info
if builder.name not in entrances.keys():
name_parts = builder.name.rsplit(' ', 1)
entrance_list = splits[name_parts[0]][name_parts[1]]
entrances = []
for sector in builder.sectors:
if sector.is_entrance_sector():
sector.region_set()
entrances.append(sector)
all_dead = True
for sector in entrances:
for region in entrance_list:
if region in sector.region_set():
portal = next((x for x in world.dungeon_portals[player] if x.door.entrance.parent_region.name == region), None)
if portal and not portal.deadEnd:
all_dead = False
break
if not all_dead:
break
is_dead_end = all_dead
return len(sector_list) == 0 if is_dead_end else True
def valid_c_switch(builder, sector_list):
if builder.c_switch_present:
return True
for sector in sector_list:
if sector.c_switch:
return True
if builder.c_switch_required:
return False
for sector in sector_list:
if sector.blue_barrier:
return False
return True
def valid_connected_assignment(builder, sector_list):
full_list = sector_list + builder.sectors
if len(full_list) == 1 and sum_magnitude(full_list) == [0, 0, 0]:
return True
for sector in full_list:
if sector.is_entrance_sector():
continue
others = [x for x in full_list if x != sector]
other_mag = sum_magnitude(others)
sector_mag = sector.magnitude()
hookable = False
for i in range(len(sector_mag)):
if sector_mag[i] > 0 and other_mag[i] > 0:
hookable = True
if not hookable:
return False
return True
def valid_branch_assignment(builder, sector_list):
if not valid_connected_assignment(builder, sector_list):
return False
return valid_branch_only(builder, sector_list)
def valid_branch_only(builder, sector_list):
forced_loops = calc_forced_loops(builder.sectors + sector_list)
ttl_deads = 0
ttl_branches = 0
for s in sector_list:
nches += s.branches()
return builder.dead_ends + ttl_deads + forced_loops * 2 <= builder.branches + ttl_branches + builder.allowance
def valid_polarized_assignment(builder, sector_list):
if not valid_branch_assignment(builder, sector_list):
return False
return (sum_polarity(sector_list) + sum_polarity(builder.sectors)).is_neutral()
def assign_the_rest(dungeon_map, neutral_sectors, global_pole, builder_info):
comb_w_replace = len(dungeon_map) ** len(neutral_sectors)
combinations = None
if comb_w_replace <= 1000:
combinations = list(itertools.product(dungeon_map.keys(), repeat=len(neutral_sectors)))
random.shuffle(combinations)
tries = 0
while len(neutral_sectors) > 0:
if tries > 1000 or (combinations and tries >= len(combinations)):
raise GenerationException('No valid assignment found for "neutral" sectors. Ref: %s' % next(iter(dungeon_map.keys())))
if combinations:
choices = combinations[tries]
else:
choices = random.choices(list(dungeon_map.keys()), k=len(neutral_sectors))
neutral_sector_list = list(neutral_sectors)
chosen_sectors = defaultdict(list)
for i, choice in enumerate(choices):
chosen_sectors[choice].append(neutral_sector_list[i])
all_valid = True
for name, sector_list in chosen_sectors.items():
if not valid_assignment(dungeon_map[name], sector_list, builder_info):
all_valid = False
break
if all_valid:
for name, sector_list in chosen_sectors.items():
builder = dungeon_map[name]
for sector in sector_list:
assign_sector(sector, builder, neutral_sectors, global_pole)
tries += 1
def split_dungeon_builder(builder, split_list, builder_info):
if builder.split_dungeon_map and len(builder.exception_list) == 0:
for name, proposal in builder.valid_proposal.items():
builder.split_dungeon_map[name].valid_proposal = proposal
return builder.split_dungeon_map
attempts, comb_w_replace, merge_attempt, merge_limit = 0, None, 0, len(split_list) - 1
while attempts < 5:
try:
candidate_sectors = dict.fromkeys(builder.sectors)
global_pole = GlobalPolarity(candidate_sectors)
dungeon_map, sub_builder, merge_keys = {}, None, []
if merge_attempt > 0:
candidates = []
for name, split_entrances in split_list.items():
if len(split_entrances) > 1:
candidates.append(name)
continue
elif len(split_entrances) <= 0:
continue
ents, splits, c_tuple, world, player = builder_info
r_name = split_entrances[0]
p = next((x for x in world.dungeon_portals[player] if x.door.entrance.parent_region.name == r_name), None)
if p and not p.deadEnd:
candidates.append(name)
merge_keys = random.sample(candidates, merge_attempt+1) if len(candidates) >= merge_attempt+1 else []
for name, split_entrances in split_list.items():
key = builder.name + ' ' + name
if merge_keys and name in merge_keys:
other_keys = [builder.name + ' ' + x for x in merge_keys if x != name]
other_key = next((x for x in other_keys if x in dungeon_map), None)
if other_key:
key = other_key
sub_builder = dungeon_map[other_key]
sub_builder.all_entrances.extend(split_entrances)
if key not in dungeon_map:
dungeon_map[key] = sub_builder = DungeonBuilder(key)
sub_builder.split_flag = True
sub_builder.all_entrances = list(split_entrances)
for r_name in split_entrances:
assign_sector(find_sector(r_name, candidate_sectors), sub_builder, candidate_sectors, global_pole)
comb_w_replace = len(dungeon_map) ** len(candidate_sectors)
return balance_split(candidate_sectors, dungeon_map, global_pole, builder_info)
except (GenerationException, NeutralizingException):
if comb_w_replace and comb_w_replace <= 10000:
attempts += 5
else:
attempts += 1
if attempts >= 5 and merge_attempt < merge_limit:
merge_attempt, attempts = merge_attempt + 1, 0
raise GenerationException('Unable to resolve in 5 attempts')
def balance_split(candidate_sectors, dungeon_map, global_pole, builder_info):
dungeon_entrances, split_dungeon_entrances, connections_tuple, world, player = builder_info
for name, builder in dungeon_map.items():
calc_allowance_and_dead_ends(builder, connections_tuple, world, player)
comb_w_replace = len(dungeon_map) ** len(candidate_sectors)
if comb_w_replace <= 10000:
combinations = list(itertools.product(dungeon_map.keys(), repeat=len(candidate_sectors)))
random.shuffle(combinations)
tries = 0
while tries < len(combinations):
choices = combinations[tries]
main_sector_list = list(candidate_sectors)
chosen_sectors = defaultdict(list)
for i, choice in enumerate(choices):
chosen_sectors[choice].append(main_sector_list[i])
all_valid = True
for name, builder in dungeon_map.items():
if not valid_assignment(builder, chosen_sectors[name], builder_info):
all_valid = False
break
if all_valid:
for name, sector_list in chosen_sectors.items():
builder = dungeon_map[name]
for sector in sector_list:
assign_sector(sector, builder, candidate_sectors, global_pole)
return dungeon_map
tries += 1
raise GenerationException('Split Dungeon Builder: Impossible dungeon. Ref %s' % next(iter(dungeon_map.keys())))
check_for_forced_dead_ends(dungeon_map, candidate_sectors, global_pole)
check_for_forced_assignments(dungeon_map, candidate_sectors, global_pole)
check_for_forced_crystal(dungeon_map, candidate_sectors, global_pole)
crystal_switches, crystal_barriers, neutral_sectors, polarized_sectors = categorize_sectors(candidate_sectors)
leftover = assign_crystal_switch_sectors(dungeon_map, crystal_switches, crystal_barriers,
global_pole, len(crystal_barriers) > 0)
ensure_crystal_switches_reachable(dungeon_map, leftover, polarized_sectors, crystal_barriers, global_pole)
for sector in leftover:
if sector.polarity().is_neutral():
neutral_sectors[sector] = None
else:
polarized_sectors[sector] = None
assign_crystal_barrier_sectors(dungeon_map, crystal_barriers, global_pole)
assign_polarized_sectors(dungeon_map, polarized_sectors, global_pole, builder_info)
assign_the_rest(dungeon_map, neutral_sectors, global_pole, builder_info)
return dungeon_map
def check_for_forced_dead_ends(dungeon_map, candidate_sectors, global_pole):
dead_end_sectors = [x for x in candidate_sectors if x.branching_factor() <= 1]
other_sectors = [x for x in candidate_sectors if x not in dead_end_sectors]
for name, builder in dungeon_map.items():
other_sectors += builder.sectors
other_magnitude = sum_hook_magnitude(other_sectors)
dead_cnt = [0] * len(Hook)
for sector in dead_end_sectors:
hook_mag = sector.hook_magnitude()
for hook in Hook:
if hook_mag[hook.value] != 0:
dead_cnt[hook.value] += 1
for hook in Hook:
opp = opposite_h_type(hook).value
if dead_cnt[hook.value] > other_magnitude[opp]:
raise GenerationException('Impossible to satisfy all these dead ends')
elif dead_cnt[hook.value] == other_magnitude[opp]:
candidates = [x for x in dead_end_sectors if x.hook_magnitude()[hook.value] > 0]
for sector in other_sectors:
if sector.hook_magnitude()[opp] > 0 and sector.is_entrance_sector() and sector.branching_factor() == 2:
builder = None
for b in dungeon_map.values():
if sector in b.sectors:
builder = b
break
valid, candidate_sector = False, None
while not valid:
if len(candidates) == 0:
raise GenerationException('Split Dungeon Builder: Bad dead end %s' % builder.name)
candidate_sector = random.choice(candidates)
candidates.remove(candidate_sector)
valid = global_pole.is_valid_choice(dungeon_map, builder, [candidate_sector]) and check_crystal(candidate_sector, sector)
assign_sector(candidate_sector, builder, candidate_sectors, global_pole)
builder.c_locked = True
def check_crystal(dead_end, entrance):
if dead_end.blue_barrier and not entrance.c_switch and not dead_end.c_switch:
return False
if entrance.blue_barrier and not entrance.c_switch and not dead_end.c_switch:
return False
return True
def check_for_forced_assignments(dungeon_map, candidate_sectors, global_pole):
done = False
while not done:
done = True
magnitude = sum_hook_magnitude(candidate_sectors)
dungeon_hooks = {}
for name, builder in dungeon_map.items():
dungeon_hooks[name] = sum_hook_magnitude(builder.sectors)
for val in Hook:
if magnitude[val.value] == 1:
forced_sector = None
for sec in candidate_sectors:
if sec.hook_magnitude()[val.value] > 0:
forced_sector = sec
break
opp = opposite_h_type(val).value
other_sectors = [x for x in candidate_sectors if x != forced_sector]
if sum_hook_magnitude(other_sectors)[opp] == 0:
found_hooks = []
for name, hooks in dungeon_hooks.items():
if hooks[opp] > 0 and not dungeon_map[name].c_locked:
found_hooks.append(name)
if len(found_hooks) == 1:
done = False
assign_sector(forced_sector, dungeon_map[found_hooks[0]], candidate_sectors, global_pole)
def check_for_forced_crystal(dungeon_map, candidate_sectors, global_pole):
for name, builder in dungeon_map.items():
if check_for_forced_crystal_single(builder, candidate_sectors):
builder.c_switch_required = True
def check_for_forced_crystal_single(builder, candidate_sectors):
builder_doors = defaultdict(dict)
for sector in builder.sectors:
for door in sector.outstanding_doors:
builder_doors[hook_from_door(door)][door] = sector
if len(builder_doors) == 0:
return False
candidate_doors = defaultdict(dict)
for sector in candidate_sectors:
for door in sector.outstanding_doors:
candidate_doors[hook_from_door(door)][door] = sector
for hook in builder_doors.keys():
for door in builder_doors[hook].keys():
opp = opposite_h_type(hook)
if opp in builder_doors.keys():
for d, sector in builder_doors[opp].items():
if d != door and (not sector.blue_barrier or sector.c_switch):
return False
for d, sector in candidate_doors[opp].items():
if not sector.blue_barrier or sector.c_switch:
return False
return True
def categorize_sectors(candidate_sectors):
crystal_switches = {}
crystal_barriers = {}
polarized_sectors = {}
neutral_sectors = {}
for sector in candidate_sectors:
if sector.c_switch:
crystal_switches[sector] = None
elif sector.blue_barrier:
crystal_barriers[sector] = None
elif sector.polarity().is_neutral():
neutral_sectors[sector] = None
else:
polarized_sectors[sector] = None
return crystal_switches, crystal_barriers, neutral_sectors, polarized_sectors
class NeutralizingException(Exception):
pass
class GenerationException(Exception):
pass
class DoorEquation:
def __init__(self, door):
self.door = door
self.cost = None, None
self.benefit = defaultdict(list)
self.required = False
self.access_id = None
self.c_switch = False
self.crystal_blocked = {}
self.entrance_flag = False
def copy(self):
eq = DoorEquation(self.door)
eq.cost = self.cost
for key, doors in self.benefit.items():
eq.benefit[key] = doors.copy()
eq.required = self.required
eq.c_switch = self.c_switch
eq.crystal_blocked = self.crystal_blocked.copy()
return eq
def total_cost(self):
return 0 if self.cost[0] is None else 1
def gross(self, current_access):
key, cost_door = self.cost
if key is None:
crystal_access = current_access.access_door[None]
else:
crystal_access = None
for match_door, crystal in current_access.outstanding_doors.items():
if hook_from_door(match_door) == key:
if crystal_access is None or current_access._better_crystal(crystal_access, crystal):
crystal_access = crystal
ttl = 0
for key, door_list in self.benefit.items():
for door in door_list:
if door in current_access.outstanding_doors.keys() or door in current_access.proposed_connections.keys():
continue
if door in self.crystal_blocked.keys() and not self.c_switch:
if crystal_access == CrystalBarrier.Either or crystal_access == self.crystal_blocked[door]:
ttl += 1
else:
ttl += 1
return ttl
def profit(self, current_access):
return self.gross(current_access) - self.total_cost()
def neutral(self):
key, door = self.cost
if key is not None and len(self.benefit[key]) <= 0:
return False
return True
def neutral_profit(self):
key, door = self.cost
if key is not None:
if len(self.benefit[key]) < 1:
return False
if len(self.benefit[key]) > 1:
return True
return False
else:
return True
def can_cover_cost(self, current_access):
key, door = self.cost
if key is not None and current_access[key] < 1:
return False
return True
class DungeonAccess:
def __init__(self):
self.access = defaultdict(int)
self.door_access = {}
self.door_sector_map = {}
self.outstanding_doors = {}
self.blocked_doors = {}
self.door_access[None] = CrystalBarrier.Orange
self.proposed_connections = {}
self.reached_doors = set()
def can_cover_equation(self, equation):
key, door = equation.cost
if key is None:
return True
return self.access[key] >= 1
def can_pay(self, key):
if key is None:
return True
return self.access[key] >= 1
def adjust_for_equation(self, equation, sector):
if equation.cost[0] is None:
original_crystal = self.door_access[None]
for key, door_list in equation.benefit.items():
self.access[key] += len(door_list)
for door in door_list:
crystal_state = CrystalBarrier.Either if equation.c_switch else original_crystal
if crystal_state == CrystalBarrier.Either:
self.door_access[None] = CrystalBarrier.Either
self.door_access[door] = crystal_state
self.door_sector_map[door] = sector
self.outstanding_doors[door] = crystal_state
self.reached_doors.add(door)
else:
key, door = equation.cost
self.access[key] -= 1
# find the a matching connection
best_door, best_crystal = None, None
for match_door, crystal in self.outstanding_doors.items():
if hook_from_door(match_door) == key:
if best_door is None or self._better_crystal(best_crystal, crystal):
best_door = match_door
best_crystal = crystal
if best_door is None:
raise Exception('Something went terribly wrong I think')
# for match_door, crystal in self.blocked_doors.items():
# if hook_from_door(match_door) == key:
# if best_door is None or self._better_crystal(best_crystal, crystal):
# best_door = match_door
# best_crystal = crystal
self.door_sector_map[door] = sector
self.door_access[door] = best_crystal
self.reached_doors.add(door)
self.proposed_connections[door] = best_door
self.proposed_connections[best_door] = door
if best_door in self.outstanding_doors.keys():
del self.outstanding_doors[best_door]
elif best_door in self.blocked_doors.keys():
del self.blocked_doors[best_door]
self.reached_doors.add(best_door)
# todo: backpropagate crystal access
if equation.c_switch or best_crystal == CrystalBarrier.Either:
# if not equation.door.blocked:
self.door_access[door] = CrystalBarrier.Either
self.door_access[best_door] = CrystalBarrier.Either
queue = deque([best_door, door])
visited = set()
while len(queue) > 0:
next_door = queue.popleft()
visited.add(next_door)
curr_sector = self.door_sector_map[next_door]
next_eq = None
for eq in curr_sector.equations:
if eq.door == next_door:
next_eq = eq
break
if next_eq.entrance_flag:
crystal_state = self.door_access[next_door]
self.door_access[None] = crystal_state
for eq in curr_sector.equations:
cand_door = eq.door
crystal_state = self.door_access[None]
if cand_door in next_eq.crystal_blocked.keys():
crystal_state = next_eq.crystal_blocked[cand_door]
if cand_door not in visited:
self.door_access[cand_door] = crystal_state
if not cand_door.blocked:
if cand_door in self.outstanding_doors.keys():
self.outstanding_doors[cand_door] = crystal_state
if cand_door in self.proposed_connections.keys():
partner_door = self.proposed_connections[cand_door]
self.door_access[partner_door] = crystal_state
if partner_door in self.outstanding_doors.keys():
self.outstanding_doors[partner_door] = crystal_state
if partner_door not in visited:
queue.append(partner_door)
else:
for key, door_list in next_eq.benefit.items():
for cand_door in door_list:
crystal_state = self.door_access[next_door]
if cand_door in next_eq.crystal_blocked.keys():
crystal_state = next_eq.crystal_blocked[cand_door]
if cand_door in self.blocked_doors.keys():
needed_crystal = self.blocked_doors[cand_door]
if meets_crystal_requirment(crystal_state, needed_crystal):
del self.blocked_doors[cand_door]
if cand_door != door:
self.access[key] += 1
self.outstanding_doors[cand_door] = crystal_state
self.door_access[cand_door] = crystal_state
self.reached_doors.add(cand_door)
if cand_door not in visited:
self.door_access[cand_door] = crystal_state
if not cand_door.blocked:
if cand_door in self.outstanding_doors.keys():
self.outstanding_doors[cand_door] = crystal_state
if cand_door in self.proposed_connections.keys():
partner_door = self.proposed_connections[cand_door]
self.door_access[partner_door] = crystal_state
if partner_door in self.outstanding_doors.keys():
self.outstanding_doors[partner_door] = crystal_state
queue.append(cand_door)
queue.append(partner_door)
for key, door_list in equation.benefit.items():
for door in door_list:
crystal_access = self.door_access[best_door]
can_access = True
if door in equation.crystal_blocked.keys():
if crystal_access == CrystalBarrier.Either or crystal_access == equation.crystal_blocked[door]:
crystal_access = equation.crystal_blocked[door]
else:
self.blocked_doors[door] = equation.crystal_blocked[door]
can_access = False
self.door_sector_map[door] = sector
if can_access and door not in self.reached_doors:
self.access[key] += 1
self.door_access[door] = crystal_access
self.outstanding_doors[door] = crystal_access
self.reached_doors.add(door)
def _better_crystal(self, current_champ, contender):
if current_champ == CrystalBarrier.Either:
return False
elif contender == CrystalBarrier.Either:
return True
elif current_champ == CrystalBarrier.Blue:
return False
elif contender == CrystalBarrier.Blue:
return True
else:
return False
def identify_branching_issues(dungeon_map, builder_info):
unconnected_builders = {}
for name, builder in dungeon_map.items():
resolved, unreached_doors = check_for_valid_layout(builder, [], builder_info)
if not resolved:
unconnected_builders[name] = builder
for hook, door_list in unreached_doors.items():
builder.unfulfilled[hook] += len(door_list)
return unconnected_builders
def check_for_valid_layout(builder, sector_list, builder_info):
dungeon_entrances, split_dungeon_entrances, c_tuple, world, player = builder_info
if builder.name in split_dungeon_entrances.keys():
try:
temp_builder = DungeonBuilder(builder.name)
for s in sector_list + builder.sectors:
assign_sector_helper(s, temp_builder)
split_list = split_dungeon_entrances[builder.name]
builder.split_dungeon_map = split_dungeon_builder(temp_builder, split_list, builder_info)
builder.valid_proposal = {}
possible_regions = set()
for portal in world.dungeon_portals[player]:
if not portal.destination and portal.name in dungeon_portals[builder.name]:
possible_regions.add(portal.door.entrance.parent_region.name)
if builder.name in dungeon_drops.keys():
possible_regions.update(dungeon_drops[builder.name])
for name, split_build in builder.split_dungeon_map.items():
name_bits = name.split(" ")
orig_name = " ".join(name_bits[:-1])
entrance_regions = split_dungeon_entrances[orig_name][name_bits[-1]]
# todo: this is hardcoded information for random entrances
for sector in split_build.sectors:
match_set = set(sector.region_set()).intersection(possible_regions)
if len(match_set) > 0:
for r_name in match_set:
if r_name not in entrance_regions:
entrance_regions.append(r_name)
# entrance_regions = [x for x in entrance_regions if x not in split_check_entrance_invalid]
proposal = generate_dungeon_find_proposal(split_build, entrance_regions, True, world, player)
# record split proposals
builder.valid_proposal[name] = proposal
builder.exception_list = list(sector_list)
return True, {}
except (GenerationException, NeutralizingException):
builder.split_dungeon_map = None
builder.valid_proposal = None
unreached_doors = resolve_equations(builder, sector_list)
return False, unreached_doors
else:
unreached_doors = resolve_equations(builder, sector_list)
return len(unreached_doors) == 0, unreached_doors
def resolve_equations(builder, sector_list):
unreached_doors = defaultdict(list)
equations = {x: y for x, y in copy_door_equations(builder, sector_list).items() if len(y) > 0}
current_access = {}
sector_split = {} # those sectors that belong to a certain sector
if builder.name in split_region_starts.keys():
for name, region_list in split_region_starts[builder.name].items():
current_access[name] = DungeonAccess()
for r_name in region_list:
sector = find_sector(r_name, builder.sectors)
sector_split[sector] = name
else:
current_access[builder.name] = DungeonAccess()
# resolve all that provide more access
free_sector, eq_list, free_eq = find_free_equation(equations)
while free_eq is not None:
if free_sector in sector_split.keys():
access_id = sector_split[free_sector]
access = current_access[access_id]
else:
access_id = next(iter(current_access.keys()))
access = current_access[access_id]
resolve_equation(free_eq, eq_list, free_sector, access_id, access, equations)
free_sector, eq_list, free_eq = find_free_equation(equations)
while len(equations) > 0:
valid_access = next_access(current_access)
eq, eq_list, sector, access, access_id = None, None, None, None, None
if len(valid_access) == 1:
access_id, access = valid_access[0]
eq, eq_list, sector = find_priority_equation(equations, access_id, access)
elif len(valid_access) > 1:
access_id, access = valid_access[0]
eq, eq_list, sector = find_greedy_equation(equations, access_id, access, sector_split)
if eq:
resolve_equation(eq, eq_list, sector, access_id, access, equations)
else:
for sector, eq_list in equations.items():
for eq in eq_list:
unreached_doors[hook_from_door(eq.door)].append(eq.door)
return unreached_doors
valid_access = next_access(current_access)
for access_id, dungeon_access in valid_access:
access = dungeon_access.access
access[Hook.Stairs] = access[Hook.Stairs] % 2
ns_leftover = min(access[Hook.North], access[Hook.South])
access[Hook.North] -= ns_leftover
access[Hook.South] -= ns_leftover
ew_leftover = min(access[Hook.West], access[Hook.East])
access[Hook.East] -= ew_leftover
access[Hook.West] -= ew_leftover
if sum(access.values()) > 0:
for hook, num in access.items():
for i in range(num):
unreached_doors[hook].append('placeholder')
return unreached_doors
def next_access(current_access):
valid_ones = [(x, y) for x, y in current_access.items() if sum(y.access.values()) > 0]
valid_ones.sort(key=lambda x: sum(x[1].access.values()))
return valid_ones
# an equations with no change to access (check)
# the highest benefit equations, that can be paid for (check)
# 0-benefit required transforms
# 0-benefit transforms (how to pick between these?)
# negative benefit transforms (dead end)
def find_priority_equation(equations, access_id, current_access):
flex = calc_flex(equations, current_access)
required = calc_required(equations, current_access)
wanted_candidates = []
best_profit = None
all_candidates = []
local_profit_map = {}
for sector, eq_list in equations.items():
eq_list.sort(key=lambda eq: eq.profit(current_access), reverse=True)
best_local_profit = None
for eq in eq_list:
profit = eq.profit(current_access)
if current_access.can_cover_equation(eq) and (eq.access_id is None or eq.access_id == access_id):
# if eq.neutral_profit() or eq.neutral():
# return eq, eq_list, sector # don't need to compare - just use it now
if best_local_profit is None or profit > best_local_profit:
best_local_profit = profit
all_candidates.append((eq, eq_list, sector))
elif (best_profit is None or profit >= best_profit) and profit > 0:
if best_profit is None or profit > best_profit:
wanted_candidates = [eq]
best_profit = profit
else:
wanted_candidates.append(eq)
local_profit_map[sector] = best_local_profit
filtered_candidates = filter_requirements(all_candidates, equations, required, current_access)
filtered_candidates = [x for x in filtered_candidates if x[0].gross(current_access) > 0]
if len(filtered_candidates) == 0:
filtered_candidates = all_candidates
if len(filtered_candidates) == 0:
return None, None, None
if len(filtered_candidates) == 1:
return filtered_candidates[0]
neutral_candidates = [x for x in filtered_candidates if (x[0].neutral_profit() or x[0].neutral()) and x[0].profit(current_access) == local_profit_map[x[2]]]
if len(neutral_candidates) == 0:
neutral_candidates = filtered_candidates
if len(neutral_candidates) == 1:
return neutral_candidates[0]
filtered_candidates = filter_requirements(neutral_candidates, equations, required, current_access)
if len(filtered_candidates) == 0:
filtered_candidates = neutral_candidates
if len(filtered_candidates) == 1:
return filtered_candidates[0]
triplet_candidates = []
best_profit = None
for eq, eq_list, sector in filtered_candidates:
profit = eq.profit(current_access)
if best_profit is None or profit >= best_profit:
if best_profit is None or profit > best_profit:
triplet_candidates = [(eq, eq_list, sector)]
best_profit = profit
else:
triplet_candidates.append((eq, eq_list, sector))
filtered_candidates = filter_requirements(triplet_candidates, equations, required, current_access)
if len(filtered_candidates) == 0:
filtered_candidates = triplet_candidates
if len(filtered_candidates) == 1:
return filtered_candidates[0]
required_candidates = [x for x in filtered_candidates if x[0].required]
if len(required_candidates) == 0:
required_candidates = filtered_candidates
if len(required_candidates) == 1:
return required_candidates[0]
c_switch_candidates = [x for x in required_candidates if x[0].c_switch]
if len(c_switch_candidates) == 0:
c_switch_candidates = required_candidates
if len(c_switch_candidates) == 1:
return c_switch_candidates[0]
loop_candidates = find_enabling_switch_connections(current_access)
if len(loop_candidates) >= 1:
return loop_candidates[0] # just pick one
flexible_candidates = [x for x in c_switch_candidates if x[0].can_cover_cost(flex)]
if len(flexible_candidates) == 0:
flexible_candidates = c_switch_candidates
if len(flexible_candidates) == 1:
return flexible_candidates[0]
good_local_candidates = [x for x in flexible_candidates if local_profit_map[x[2]] == x[0].profit(current_access)]
if len(good_local_candidates) == 0:
good_local_candidates = flexible_candidates
if len(good_local_candidates) == 1:
return good_local_candidates[0]
leads_to_profit = [x for x in good_local_candidates if can_enable_wanted(x[0], wanted_candidates)]
if len(leads_to_profit) == 0:
leads_to_profit = good_local_candidates
if len(leads_to_profit) == 1:
return leads_to_profit[0]
cost_point = {x[0]: find_cost_point(x, current_access) for x in leads_to_profit}
best_point = max(cost_point.values())
cost_point_candidates = [x for x in leads_to_profit if cost_point[x[0]] == best_point]
if len(cost_point_candidates) == 0:
cost_point_candidates = leads_to_profit
return cost_point_candidates[0] # just pick one I guess
def find_enabling_switch_connections(current_access):
triad_list = []
# probably should check for loop/branches in builder at some stage
# - but this could indicate that a loop or branch is necessary
for cand_door, crystal in current_access.outstanding_doors.items():
for blocked_door, req_crystal in current_access.blocked_doors.items():
if hook_from_door(cand_door) == hanger_from_door(blocked_door):
if crystal == CrystalBarrier.Either or crystal == req_crystal:
sector, equation = current_access.door_sector_map[blocked_door], None
for eq in sector.equations:
if eq.door == blocked_door:
equation = eq.copy()
break
if equation:
triad_list.append((equation, [equation], sector))
return triad_list
def find_cost_point(eq_triplet, access):
cost_point = 0
key, cost_door = eq_triplet[0].cost
if cost_door is not None:
cost_point += access.access[key] - 1
return cost_point
def find_greedy_equation(equations, access_id, current_access, sector_split):
all_candidates = []
for sector, eq_list in equations.items():
if sector not in sector_split.keys() or sector_split[sector] == access_id:
eq_list.sort(key=lambda eq: eq.profit(current_access), reverse=True)
for eq in eq_list:
if current_access.can_cover_equation(eq) and (eq.access_id is None or eq.access_id == access_id):
all_candidates.append((eq, eq_list, sector))
if len(all_candidates) == 0:
return None, None, None # can't pay for anything
if len(all_candidates) == 1:
return all_candidates[0]
filtered_candidates = [x for x in all_candidates if x[0].profit(current_access) + 2 >= len(x[2].outstanding_doors)]
if len(filtered_candidates) == 0:
filtered_candidates = all_candidates
if len(filtered_candidates) == 1:
return filtered_candidates[0]
triplet_candidates = []
worst_profit = None
for eq, eq_list, sector in filtered_candidates:
profit = eq.profit(current_access)
if worst_profit is None or profit <= worst_profit:
if worst_profit is None or profit < worst_profit:
triplet_candidates = [(eq, eq_list, sector)]
worst_profit = profit
else:
triplet_candidates.append((eq, eq_list, sector))
if len(triplet_candidates) == 0:
triplet_candidates = filtered_candidates
return triplet_candidates[0]
def calc_required(equations, current_access):
ttl = sum(current_access.access.values())
local_profit_map = {}
for sector, eq_list in equations.items():
best_local_profit = None
for eq in eq_list:
profit = eq.profit(current_access)
if best_local_profit is None or profit > best_local_profit:
best_local_profit = profit
local_profit_map[sector] = best_local_profit
ttl += best_local_profit
if ttl == 0:
new_lists = {}
for sector, eq_list in equations.items():
if len(eq_list) > 1:
rem_list = []
for eq in eq_list:
if eq.profit(current_access) < local_profit_map[sector]:
rem_list.append(eq)
if len(rem_list) > 0:
new_lists[sector] = [x for x in eq_list if x not in rem_list]
for sector, eq_list in new_lists.items():
if len(eq_list) <= 1:
for eq in eq_list:
eq.required = True
equations[sector] = eq_list
required_costs = defaultdict(int)
required_benefits = defaultdict(int)
for sector, eq_list in equations.items():
for eq in eq_list:
if eq.required:
key, door = eq.cost
required_costs[key] += 1
for key, door_list in eq.benefit.items():
required_benefits[key] += len(door_list)
return required_costs, required_benefits
def calc_flex(equations, current_access):
flex_spending = defaultdict(int)
required_costs = defaultdict(int)
for sector, eq_list in equations.items():
for eq in eq_list:
if eq.required:
key, door = eq.cost
required_costs[key] += 1
for key in Hook:
flex_spending[key] = max(0, current_access.access[key]-required_costs[key])
return flex_spending
def filter_requirements(triplet_candidates, equations, required, current_access):
r_costs, r_exits = required
valid_candidates = []
for cand, cand_list, cand_sector in triplet_candidates:
valid = True
if not cand.required and not cand.c_switch:
potential_benefit = defaultdict(int)
benefit_counted = set()
potential_costs = defaultdict(int)
for h_type, benefit in current_access.access.items():
cur_cost = 1 if cand.cost[0] is not None else 0
if benefit - cur_cost > 0:
potential_benefit[h_type] += benefit - cur_cost
for h_type, benefit_list in cand.benefit.items():
potential_benefit[h_type] += len(benefit_list)
for sector, eq_list in equations.items():
if sector == cand_sector:
affected_doors = [d for x in cand.benefit.values() for d in x] + [cand.cost[1]]
adj_list = [x for x in eq_list if x.door not in affected_doors]
else:
adj_list = eq_list
for eq in adj_list:
for h_type, benefit_list in eq.benefit.items():
total_benefit = set(benefit_list) - benefit_counted
potential_benefit[h_type] += len(total_benefit)
benefit_counted.update(benefit_list)
h_type, cost_door = eq.cost
potential_costs[h_type] += 1
for h_type, requirement in r_costs.items():
if requirement > 0 and potential_benefit[h_type] < requirement:
valid = False
break
if valid:
for h_type, requirement in r_exits.items():
if requirement > 0 and potential_costs[h_type] < requirement:
valid = False
break
if valid:
valid_candidates.append((cand, cand_list, cand_sector))
return valid_candidates
def can_enable_wanted(test_eq, wanted_candidates):
for wanted in wanted_candidates:
covered = True
key, cost_door = wanted.cost
if len(test_eq.benefit[key]) < 1:
covered = False
if covered:
return True
return False
def resolve_equation(equation, eq_list, sector, access_id, current_access, equations):
if not current_access.can_pay(equation.cost[0]):
raise GenerationException('Cannot pay for this connection')
current_access.adjust_for_equation(equation, sector)
eq_list.remove(equation)
reached_doors = set(current_access.reached_doors)
reached_doors.update(current_access.blocked_doors.keys())
for r_eq in list(eq_list):
all_benefits_met = r_eq.door in reached_doors
for key in Hook:
fringe_list = [x for x in r_eq.benefit[key] if x not in reached_doors]
r_eq.benefit[key] = fringe_list
if len(fringe_list) > 0:
all_benefits_met = False
if all_benefits_met:
eq_list.remove(r_eq)
if len(eq_list) == 0 and sector in equations.keys():
del equations[sector]
else:
for eq in eq_list:
eq.access_id = access_id
def find_free_equation(equations):
for sector, eq_list in equations.items():
for eq in eq_list:
if eq.total_cost() <= 0:
return sector, eq_list, eq
return None, None, None
def copy_door_equations(builder, sector_list):
equations = {}
for sector in builder.sectors + sector_list:
if sector.equations is None:
sector.equations = calc_sector_equations(sector)
curr_list = equations[sector] = []
for equation in sector.equations:
curr_list.append(equation.copy())
return equations
def calc_sector_equations(sector):
equations = []
is_entrance = sector.is_entrance_sector() and not sector.destination_entrance
if is_entrance:
flagged_equations = []
for door in sector.outstanding_doors:
equation, flag = calc_door_equation(door, sector, True)
if flag:
flagged_equations.append(equation)
equations.append(equation)
for flagged_equation in flagged_equations:
for equation in equations:
for key, door_list in equation.benefit.items():
if flagged_equation.door in door_list and flagged_equation != equation:
door_list.remove(flagged_equation.door)
else:
for door in sector.outstanding_doors:
equation, flag = calc_door_equation(door, sector, False)
equations.append(equation)
return equations
def calc_door_equation(door, sector, look_for_entrance):
if look_for_entrance and not door.blocked:
flag = sector.is_entrance_sector()
if flag:
eq = DoorEquation(door)
eq.benefit[hook_from_door(door)].append(door)
eq.required = True
eq.c_switch = door.crystal == CrystalBarrier.Either
eq.entrance_flag = True
return eq, flag
eq = DoorEquation(door)
eq.required = door.blocked or door.dead
eq.cost = (hanger_from_door(door), door)
eq.entrance_flag = sector.is_entrance_sector()
if not door.stonewall:
start_region = door.entrance.parent_region
visited = {(start_region, CrystalBarrier.Null)}
queue = deque([(start_region, CrystalBarrier.Null)])
found_events = set()
event_doors = set()
while len(queue) > 0:
region, crystal_barrier = queue.popleft()
if region.crystal_switch and crystal_barrier == CrystalBarrier.Null:
eq.c_switch = True
crystal_barrier = CrystalBarrier.Either
for loc in region.locations:
if loc.name in dungeon_events:
found_events.add(loc.name)
for d in event_doors:
if loc.name == d.req_event:
connect = d.entrance.connected_region
if connect is not None and connect.type == RegionType.Dungeon and valid_crystal(d, crystal_barrier):
cb_flag = crystal_barrier if d.crystal == CrystalBarrier.Null else d.crystal
cb_flag = CrystalBarrier.Null if cb_flag == CrystalBarrier.Either else cb_flag
if (connect, cb_flag) not in visited:
visited.add((connect, cb_flag))
queue.append((connect, cb_flag))
for ext in region.exits:
d = ext.door
if d is not None:
if d.controller is not None:
d = d.controller
if d is not door and d in sector.outstanding_doors and not d.blocked:
eq_list = eq.benefit[hook_from_door(d)]
if d not in eq_list:
eq_list.append(d)
crystal_barrier = crystal_barrier if d.crystal == CrystalBarrier.Null else d.crystal
if crystal_barrier != CrystalBarrier.Null:
if d in eq.crystal_blocked.keys() and eq.crystal_blocked[d] != crystal_barrier:
del eq.crystal_blocked[d]
else:
eq.crystal_blocked[d] = crystal_barrier
elif d.crystal == CrystalBarrier.Null:
if d in eq.crystal_blocked.keys() and eq.crystal_blocked[d] != crystal_barrier:
del eq.crystal_blocked[d]
if d.req_event is not None and d.req_event not in found_events:
event_doors.add(d)
else:
connect = ext.connected_region if ext.door.controller is None else d.entrance.parent_region
if connect is not None and connect.type == RegionType.Dungeon and valid_crystal(d, crystal_barrier):
cb_flag = crystal_barrier if d.crystal == CrystalBarrier.Null else d.crystal
cb_flag = CrystalBarrier.Null if cb_flag == CrystalBarrier.Either else cb_flag
if (connect, cb_flag) not in visited:
visited.add((connect, cb_flag))
queue.append((connect, cb_flag))
if len(eq.benefit) == 0:
eq.required = True
return eq, False
def meets_crystal_requirment(current_crystal, requirement):
if current_crystal == CrystalBarrier.Either:
return True
return current_crystal == requirement
def valid_crystal(door, current_crystal):
if door.crystal in [CrystalBarrier.Null, CrystalBarrier.Either]:
return True
if current_crystal in [CrystalBarrier.Either, CrystalBarrier.Null]:
return True
return door.crystal == current_crystal
def kth_combination(k, l, r):
if r == 0:
return []
elif len(l) == r:
return l
else:
i = ncr(len(l) - 1, r - 1)
if k < i:
return l[0:1] + kth_combination(k, l[1:], r - 1)
else:
return kth_combination(k - i, l[1:], r)
def ncr(n, r):
if r == 0:
return 1
r = min(r, n - r)
numerator = reduce(op.mul, range(n, n - r, -1), 1)
denominator = reduce(op.mul, range(1, r + 1), 1)
return int(numerator / denominator)
dungeon_boss_sectors = {
'Hyrule Castle': [],
'Eastern Palace': ['Eastern Boss'],
'Desert Palace': ['Desert Boss'],
'Tower of Hera': ['Hera Boss'],
'Agahnims Tower': ['Tower Agahnim 1'],
'Palace of Darkness': ['PoD Boss'],
'Swamp Palace': ['Swamp Boss'],
'Skull Woods': ['Skull Boss'],
'Thieves Town': ['Thieves Blind\'s Cell', 'Thieves Boss'],
'Ice Palace': ['Ice Boss'],
'Misery Mire': ['Mire Boss'],
'Turtle Rock': ['TR Boss'],
'Ganons Tower': ['GT Agahnim 2']
}
default_dungeon_entrances = {
'Hyrule Castle': ['Hyrule Castle Lobby', 'Hyrule Castle West Lobby', 'Hyrule Castle East Lobby', 'Sewers Rat Path',
'Sanctuary'],
'Eastern Palace': ['Eastern Lobby'],
'Desert Palace': ['Desert Back Lobby', 'Desert Main Lobby', 'Desert West Lobby', 'Desert East Lobby'],
'Tower of Hera': ['Hera Lobby'],
'Agahnims Tower': ['Tower Lobby'],
'Palace of Darkness': ['PoD Lobby'],
'Swamp Palace': ['Swamp Lobby'],
'Skull Woods': ['Skull 1 Lobby', 'Skull Pinball', 'Skull Left Drop', 'Skull Pot Circle', 'Skull 2 East Lobby',
'Skull 2 West Lobby', 'Skull Back Drop', 'Skull 3 Lobby'],
'Thieves Town': ['Thieves Lobby'],
'Ice Palace': ['Ice Lobby'],
'Misery Mire': ['Mire Lobby'],
'Turtle Rock': ['TR Main Lobby', 'TR Eye Bridge', 'TR Big Chest Entrance', 'TR Lazy Eyes'],
'Ganons Tower': ['GT Lobby']
}
drop_entrances = {
'Hyrule Castle': ['Sewers Rat Path'],
'Eastern Palace': [],
'Desert Palace': [],
'Tower of Hera': [],
'Agahnims Tower': [],
'Palace of Darkness': [],
'Swamp Palace': [],
'Skull Woods': ['Skull Pinball', 'Skull Left Drop', 'Skull Pot Circle', 'Skull Back Drop'],
'Thieves Town': [],
'Ice Palace': [],
'Misery Mire': [],
'Turtle Rock': [],
'Ganons Tower': []
}
# todo: calculate these for ER - the multi entrance dungeons anyway
dungeon_dead_end_allowance = {
'Hyrule Castle': 6,
'Eastern Palace': 1,
'Desert Palace': 2,
'Tower of Hera': 1,
'Agahnims Tower': 1,
'Palace of Darkness': 1,
'Swamp Palace': 1,
'Skull Woods': 3, # two allowed in skull 1, 1 in skull 3, 0 in skull 2
'Thieves Town': 1,
'Ice Palace': 1,
'Misery Mire': 1,
'Turtle Rock': 2, # this assumes one overworld connection
'Ganons Tower': 1,
'Desert Palace Back': 1,
'Desert Palace Main': 1,
'Skull Woods 1': 0,
'Skull Woods 2': 0,
'Skull Woods 3': 1,
}
drop_entrances_allowance = [
'Sewers Rat Path', 'Skull Pinball', 'Skull Left Drop', 'Skull Pot Circle', 'Skull Back Drop'
]
dead_entrances = [
'TR Big Chest Entrance'
]
split_check_entrance_invalid = [
'Desert East Lobby', 'Skull 2 West Lobby'
]
dungeon_portals = {
'Hyrule Castle': ['Hyrule Castle South', 'Hyrule Castle West', 'Hyrule Castle East', 'Sanctuary'],
'Eastern Palace': ['Eastern'],
'Desert Palace': ['Desert Back', 'Desert South', 'Desert West', 'Desert East'],
'Tower of Hera': ['Hera'],
'Agahnims Tower': ['Agahnims Tower'],
'Palace of Darkness': ['Palace of Darkness'],
'Swamp Palace': ['Swamp'],
'Skull Woods': ['Skull 1', 'Skull 2 East', 'Skull 2 West', 'Skull 3'],
'Thieves Town': ['Thieves Town'],
'Ice Palace': ['Ice'],
'Misery Mire': ['Mire'],
'Turtle Rock': ['Turtle Rock Main', 'Turtle Rock Lazy Eyes', 'Turtle Rock Chest', 'Turtle Rock Eye Bridge'],
'Ganons Tower': ['Ganons Tower']
}
dungeon_drops = {
'Hyrule Castle': ['Sewers Rat Path'],
'Skull Woods': ['Skull Pot Circle', 'Skull Pinball', 'Skull Left Drop', 'Skull Back Drop'],
}
| true
| true
|
1c49cc9c62f8a2e1acc97ec88014ca7f54dbd2f8
| 861
|
py
|
Python
|
tests/test_settings.py
|
ibqn/django-graphql-jwt
|
dd92319071092bb517187904f3ac0610e8443edf
|
[
"MIT"
] | 1
|
2019-06-19T12:05:08.000Z
|
2019-06-19T12:05:08.000Z
|
tests/test_settings.py
|
ibqn/django-graphql-jwt
|
dd92319071092bb517187904f3ac0610e8443edf
|
[
"MIT"
] | 1
|
2018-11-01T05:12:50.000Z
|
2018-11-01T05:12:50.000Z
|
tests/test_settings.py
|
ibqn/django-graphql-jwt
|
dd92319071092bb517187904f3ac0610e8443edf
|
[
"MIT"
] | 1
|
2021-03-10T17:53:41.000Z
|
2021-03-10T17:53:41.000Z
|
from datetime import timedelta
from django.test import TestCase
from graphql_jwt import settings
class SettingsTests(TestCase):
def test_perform_import(self):
f = settings.perform_import(id, '')
self.assertEqual(f, id)
f = settings.perform_import('datetime.timedelta', '')
self.assertEqual(f, timedelta)
def test_import_from_string_error(self):
with self.assertRaises(ImportError):
settings.import_from_string('import.error', '')
def test_reload_settings(self):
getattr(settings.jwt_settings, 'JWT_ALGORITHM')
settings.reload_settings(setting='TEST')
self.assertTrue(settings.jwt_settings._cached_attrs)
delattr(settings.jwt_settings, '_user_settings')
settings.jwt_settings.reload()
self.assertFalse(settings.jwt_settings._cached_attrs)
| 27.774194
| 61
| 0.70964
|
from datetime import timedelta
from django.test import TestCase
from graphql_jwt import settings
class SettingsTests(TestCase):
def test_perform_import(self):
f = settings.perform_import(id, '')
self.assertEqual(f, id)
f = settings.perform_import('datetime.timedelta', '')
self.assertEqual(f, timedelta)
def test_import_from_string_error(self):
with self.assertRaises(ImportError):
settings.import_from_string('import.error', '')
def test_reload_settings(self):
getattr(settings.jwt_settings, 'JWT_ALGORITHM')
settings.reload_settings(setting='TEST')
self.assertTrue(settings.jwt_settings._cached_attrs)
delattr(settings.jwt_settings, '_user_settings')
settings.jwt_settings.reload()
self.assertFalse(settings.jwt_settings._cached_attrs)
| true
| true
|
1c49cd379a153b4789e7d9393eb5505762cb9e05
| 576
|
py
|
Python
|
services/models/mnemosyne.py
|
life-game-player/Hephaestus
|
0c695193d8d2d8c70061e2e26ec8c718544342c6
|
[
"MIT"
] | null | null | null |
services/models/mnemosyne.py
|
life-game-player/Hephaestus
|
0c695193d8d2d8c70061e2e26ec8c718544342c6
|
[
"MIT"
] | null | null | null |
services/models/mnemosyne.py
|
life-game-player/Hephaestus
|
0c695193d8d2d8c70061e2e26ec8c718544342c6
|
[
"MIT"
] | null | null | null |
import torch
def create(
host, user, passwd,
module, operator, operation, result
):
"""
Operation:
1: Create
2: Modify
3: Query
4: Delete
Result:
0: Succeeded
1: Failed
"""
conn = torch.connect(host, user, passwd, 'hephaestus')
list_sql = list()
list_sql.append(
"INSERT INTO mnemosyne(module, operator, operation, result) "
"VALUES('{}', {}, {}, {})".format(module, operator, operation, result)
)
torch.execute_list(conn, list_sql)
| 22.153846
| 78
| 0.53125
|
import torch
def create(
host, user, passwd,
module, operator, operation, result
):
conn = torch.connect(host, user, passwd, 'hephaestus')
list_sql = list()
list_sql.append(
"INSERT INTO mnemosyne(module, operator, operation, result) "
"VALUES('{}', {}, {}, {})".format(module, operator, operation, result)
)
torch.execute_list(conn, list_sql)
| true
| true
|
1c49ce60fe551913416a74e91ae623292230edb4
| 2,260
|
py
|
Python
|
bclearer_boson_1_1_source/b_code/configurations/getters/boson_1_2e_k_configuration_getter_separate_names_and_instances.py
|
boro-alpha/bclearer_boson_1_1
|
15207d240fd3144b155922dc5c5d14822023026a
|
[
"MIT"
] | 1
|
2021-07-20T15:48:58.000Z
|
2021-07-20T15:48:58.000Z
|
bclearer_boson_1_1_source/b_code/configurations/getters/boson_1_2e_k_configuration_getter_separate_names_and_instances.py
|
boro-alpha/bclearer_boson_1_1
|
15207d240fd3144b155922dc5c5d14822023026a
|
[
"MIT"
] | null | null | null |
bclearer_boson_1_1_source/b_code/configurations/getters/boson_1_2e_k_configuration_getter_separate_names_and_instances.py
|
boro-alpha/bclearer_boson_1_1
|
15207d240fd3144b155922dc5c5d14822023026a
|
[
"MIT"
] | null | null | null |
from bclearer_boson_1_1_source.b_code.common_knowledge.inspire_matched_ea_objects import InspireMatchedEaObjects
from bclearer_source.b_code.common_knowledge.convention_shift_operation_types import ConventionShiftOperationTypes
from bclearer_source.b_code.configurations.bespoke_name_to_instance_configuration_objects import BespokeNameToInstanceConfigurationObjects
from bclearer_source.b_code.configurations.convention_shift_operation_configurations import ConventionShiftOperationConfigurations
def get_boson_1_2e_k1_configuration_separate_standard_names_and_instances() \
-> ConventionShiftOperationConfigurations:
convention_shift_operation_configuration = \
ConventionShiftOperationConfigurations(
convention_shift_operation_type=ConventionShiftOperationTypes.SEPARATE_STANDARD_NAMES_AND_INSTANCES,
output_universe_short_name='2e_k1_output_sep_standard_instances',
package_name='2e_k1_new_objects_sep_standard_instances')
return \
convention_shift_operation_configuration
def get_boson_1_2e_k2_configuration_bespoke_standard_names_and_instances() \
-> ConventionShiftOperationConfigurations:
list_of_configuration_objects = \
[
BespokeNameToInstanceConfigurationObjects(
matched_naming_space_type=InspireMatchedEaObjects.IDENTIFIER,
name_instance_attribute_name=InspireMatchedEaObjects.LOCAL_ID_ATTRIBUTE.object_name,
package_name='2e_k2_new_objects_sep_bespoke_instances'),
BespokeNameToInstanceConfigurationObjects(
matched_naming_space_type=InspireMatchedEaObjects.GEOGRAPHICAL_NAME,
matched_name_instance_type=InspireMatchedEaObjects.SPELLING_OF_NAME)
]
convention_shift_operation_configuration = \
ConventionShiftOperationConfigurations(
convention_shift_operation_type=ConventionShiftOperationTypes.SEPARATE_BESPOKE_NAMES_AND_INSTANCES,
output_universe_short_name='2e_k2_output_sep_bespoke_instances',
list_of_configuration_objects=list_of_configuration_objects,
package_name='2e_k2_new_objects_sep_bespoke_instances')
return \
convention_shift_operation_configuration
| 55.121951
| 138
| 0.815044
|
from bclearer_boson_1_1_source.b_code.common_knowledge.inspire_matched_ea_objects import InspireMatchedEaObjects
from bclearer_source.b_code.common_knowledge.convention_shift_operation_types import ConventionShiftOperationTypes
from bclearer_source.b_code.configurations.bespoke_name_to_instance_configuration_objects import BespokeNameToInstanceConfigurationObjects
from bclearer_source.b_code.configurations.convention_shift_operation_configurations import ConventionShiftOperationConfigurations
def get_boson_1_2e_k1_configuration_separate_standard_names_and_instances() \
-> ConventionShiftOperationConfigurations:
convention_shift_operation_configuration = \
ConventionShiftOperationConfigurations(
convention_shift_operation_type=ConventionShiftOperationTypes.SEPARATE_STANDARD_NAMES_AND_INSTANCES,
output_universe_short_name='2e_k1_output_sep_standard_instances',
package_name='2e_k1_new_objects_sep_standard_instances')
return \
convention_shift_operation_configuration
def get_boson_1_2e_k2_configuration_bespoke_standard_names_and_instances() \
-> ConventionShiftOperationConfigurations:
list_of_configuration_objects = \
[
BespokeNameToInstanceConfigurationObjects(
matched_naming_space_type=InspireMatchedEaObjects.IDENTIFIER,
name_instance_attribute_name=InspireMatchedEaObjects.LOCAL_ID_ATTRIBUTE.object_name,
package_name='2e_k2_new_objects_sep_bespoke_instances'),
BespokeNameToInstanceConfigurationObjects(
matched_naming_space_type=InspireMatchedEaObjects.GEOGRAPHICAL_NAME,
matched_name_instance_type=InspireMatchedEaObjects.SPELLING_OF_NAME)
]
convention_shift_operation_configuration = \
ConventionShiftOperationConfigurations(
convention_shift_operation_type=ConventionShiftOperationTypes.SEPARATE_BESPOKE_NAMES_AND_INSTANCES,
output_universe_short_name='2e_k2_output_sep_bespoke_instances',
list_of_configuration_objects=list_of_configuration_objects,
package_name='2e_k2_new_objects_sep_bespoke_instances')
return \
convention_shift_operation_configuration
| true
| true
|
1c49cebb0a5ba4d641d71c1a1b47be7267c38f1c
| 13,667
|
py
|
Python
|
LPIPSmodels/dist_model.py
|
HERMINDERSINGH1234/ML_Extra_Resolution_Increases
|
1fefceeab83f03fa8194cb63f78c5dbf7e90aeae
|
[
"Apache-2.0"
] | 1
|
2021-07-17T10:13:10.000Z
|
2021-07-17T10:13:10.000Z
|
LPIPSmodels/dist_model.py
|
HERMINDERSINGH1234/ML_Extra_Resolution_Increases
|
1fefceeab83f03fa8194cb63f78c5dbf7e90aeae
|
[
"Apache-2.0"
] | null | null | null |
LPIPSmodels/dist_model.py
|
HERMINDERSINGH1234/ML_Extra_Resolution_Increases
|
1fefceeab83f03fa8194cb63f78c5dbf7e90aeae
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
import sys
sys.path.append('..')
sys.path.append('.')
import numpy as np
import torch
from torch import nn
import os
from collections import OrderedDict
from torch.autograd import Variable
import itertools
from .base_model import BaseModel
from scipy.ndimage import zoom
import fractions
import functools
import skimage.transform
from IPython import embed
from . import networks_basic as networks
from . import util
class DistModel(BaseModel):
def name(self):
return self.model_name
def initialize(self, model='net-lin', net='alex', pnet_rand=False, pnet_tune=False, model_path=None, colorspace='Lab', use_gpu=True, printNet=False, spatial=False, spatial_shape=None, spatial_order=1, spatial_factor=None, is_train=False, lr=.0001, beta1=0.5, version='0.1'):
'''
INPUTS
model - ['net-lin'] for linearly calibrated network
['net'] for off-the-shelf network
['L2'] for L2 distance in Lab colorspace
['SSIM'] for ssim in RGB colorspace
net - ['squeeze','alex','vgg']
model_path - if None, will look in weights/[NET_NAME].pth
colorspace - ['Lab','RGB'] colorspace to use for L2 and SSIM
use_gpu - bool - whether or not to use a GPU
printNet - bool - whether or not to print network architecture out
spatial - bool - whether to output an array containing varying distances across spatial dimensions
spatial_shape - if given, output spatial shape. if None then spatial shape is determined automatically via spatial_factor (see below).
spatial_factor - if given, specifies upsampling factor relative to the largest spatial extent of a convolutional layer. if None then resized to size of input images.
spatial_order - spline order of filter for upsampling in spatial mode, by default 1 (bilinear).
is_train - bool - [True] for training mode
lr - float - initial learning rate
beta1 - float - initial momentum term for adam
version - 0.1 for latest, 0.0 was original
'''
BaseModel.initialize(self, use_gpu=use_gpu)
self.model = model
self.net = net
self.use_gpu = use_gpu
self.is_train = is_train
self.spatial = spatial
self.spatial_shape = spatial_shape
self.spatial_order = spatial_order
self.spatial_factor = spatial_factor
self.model_name = '%s [%s]'%(model,net)
if(self.model == 'net-lin'): # pretrained net + linear layer
self.net = networks.PNetLin(use_gpu=use_gpu,pnet_rand=pnet_rand, pnet_tune=pnet_tune, pnet_type=net,use_dropout=True,spatial=spatial,version=version)
kw = {}
if not use_gpu:
kw['map_location'] = 'cpu'
if(model_path is None):
import inspect
# model_path = './PerceptualSimilarity/weights/v%s/%s.pth'%(version,net)
model_path = os.path.abspath(os.path.join(inspect.getfile(self.initialize), '..', 'v%s/%s.pth'%(version,net)))
if(not is_train):
print('Loading model from: %s'%model_path)
self.net.load_state_dict(torch.load(model_path, **kw))
elif(self.model=='net'): # pretrained network
assert not self.spatial, 'spatial argument not supported yet for uncalibrated networks'
self.net = networks.PNet(use_gpu=use_gpu,pnet_type=net)
self.is_fake_net = True
elif(self.model in ['L2','l2']):
self.net = networks.L2(use_gpu=use_gpu,colorspace=colorspace) # not really a network, only for testing
self.model_name = 'L2'
elif(self.model in ['DSSIM','dssim','SSIM','ssim']):
self.net = networks.DSSIM(use_gpu=use_gpu,colorspace=colorspace)
self.model_name = 'SSIM'
else:
raise ValueError("Model [%s] not recognized." % self.model)
self.parameters = list(self.net.parameters())
if self.is_train: # training mode
# extra network on top to go from distances (d0,d1) => predicted human judgment (h*)
self.rankLoss = networks.BCERankingLoss(use_gpu=use_gpu)
self.parameters+=self.rankLoss.parameters
self.lr = lr
self.old_lr = lr
self.optimizer_net = torch.optim.Adam(self.parameters, lr=lr, betas=(beta1, 0.999))
else: # test mode
self.net.eval()
if(printNet):
print('---------- Networks initialized -------------')
networks.print_network(self.net)
print('-----------------------------------------------')
def forward_pair(self,in1,in2,retPerLayer=False):
if(retPerLayer):
return self.net.forward(in1,in2, retPerLayer=True)
else:
return self.net.forward(in1,in2)
def forward(self, in0, in1, retNumpy=True):
''' Function computes the distance between image patches in0 and in1
INPUTS
in0, in1 - torch.Tensor object of shape Nx3xXxY - image patch scaled to [-1,1]
retNumpy - [False] to return as torch.Tensor, [True] to return as numpy array
OUTPUT
computed distances between in0 and in1
'''
self.input_ref = in0
self.input_p0 = in1
if(self.use_gpu):
self.input_ref = self.input_ref.cuda()
self.input_p0 = self.input_p0.cuda()
self.var_ref = Variable(self.input_ref,requires_grad=True)
self.var_p0 = Variable(self.input_p0,requires_grad=True)
self.d0 = self.forward_pair(self.var_ref, self.var_p0)
self.loss_total = self.d0
def convert_output(d0):
if(retNumpy):
ans = d0.cpu().data.numpy()
if not self.spatial:
ans = ans.flatten()
else:
assert(ans.shape[0] == 1 and len(ans.shape) == 4)
return ans[0,...].transpose([1, 2, 0]) # Reshape to usual numpy image format: (height, width, channels)
return ans
else:
return d0
if self.spatial:
L = [convert_output(x) for x in self.d0]
spatial_shape = self.spatial_shape
if spatial_shape is None:
if(self.spatial_factor is None):
spatial_shape = (in0.size()[2],in0.size()[3])
else:
spatial_shape = (max([x.shape[0] for x in L])*self.spatial_factor, max([x.shape[1] for x in L])*self.spatial_factor)
L = [skimage.transform.resize(x, spatial_shape, order=self.spatial_order, mode='edge') for x in L]
L = np.mean(np.concatenate(L, 2) * len(L), 2)
return L
else:
return convert_output(self.d0)
# ***** TRAINING FUNCTIONS *****
def optimize_parameters(self):
self.forward_train()
self.optimizer_net.zero_grad()
self.backward_train()
self.optimizer_net.step()
self.clamp_weights()
def clamp_weights(self):
for module in self.net.modules():
if(hasattr(module, 'weight') and module.kernel_size==(1,1)):
module.weight.data = torch.clamp(module.weight.data,min=0)
def set_input(self, data):
self.input_ref = data['ref']
self.input_p0 = data['p0']
self.input_p1 = data['p1']
self.input_judge = data['judge']
if(self.use_gpu):
self.input_ref = self.input_ref.cuda()
self.input_p0 = self.input_p0.cuda()
self.input_p1 = self.input_p1.cuda()
self.input_judge = self.input_judge.cuda()
self.var_ref = Variable(self.input_ref,requires_grad=True)
self.var_p0 = Variable(self.input_p0,requires_grad=True)
self.var_p1 = Variable(self.input_p1,requires_grad=True)
def forward_train(self): # run forward pass
self.d0 = self.forward_pair(self.var_ref, self.var_p0)
self.d1 = self.forward_pair(self.var_ref, self.var_p1)
self.acc_r = self.compute_accuracy(self.d0,self.d1,self.input_judge)
# var_judge
self.var_judge = Variable(1.*self.input_judge).view(self.d0.size())
self.loss_total = self.rankLoss.forward(self.d0, self.d1, self.var_judge*2.-1.)
return self.loss_total
def backward_train(self):
torch.mean(self.loss_total).backward()
def compute_accuracy(self,d0,d1,judge):
''' d0, d1 are Variables, judge is a Tensor '''
d1_lt_d0 = (d1<d0).cpu().data.numpy().flatten()
judge_per = judge.cpu().numpy().flatten()
return d1_lt_d0*judge_per + (1-d1_lt_d0)*(1-judge_per)
def get_current_errors(self):
retDict = OrderedDict([('loss_total', self.loss_total.data.cpu().numpy()),
('acc_r', self.acc_r)])
for key in retDict.keys():
retDict[key] = np.mean(retDict[key])
return retDict
def get_current_visuals(self):
zoom_factor = 256/self.var_ref.data.size()[2]
ref_img = util.tensor2im(self.var_ref.data)
p0_img = util.tensor2im(self.var_p0.data)
p1_img = util.tensor2im(self.var_p1.data)
ref_img_vis = zoom(ref_img,[zoom_factor, zoom_factor, 1],order=0)
p0_img_vis = zoom(p0_img,[zoom_factor, zoom_factor, 1],order=0)
p1_img_vis = zoom(p1_img,[zoom_factor, zoom_factor, 1],order=0)
return OrderedDict([('ref', ref_img_vis),
('p0', p0_img_vis),
('p1', p1_img_vis)])
def save(self, path, label):
self.save_network(self.net, path, '', label)
self.save_network(self.rankLoss.net, path, 'rank', label)
def update_learning_rate(self,nepoch_decay):
lrd = self.lr / nepoch_decay
lr = self.old_lr - lrd
for param_group in self.optimizer_net.param_groups:
param_group['lr'] = lr
print('update lr [%s] decay: %f -> %f' % (type,self.old_lr, lr))
self.old_lr = lr
def score_2afc_dataset(data_loader,func):
''' Function computes Two Alternative Forced Choice (2AFC) score using
distance function 'func' in dataset 'data_loader'
INPUTS
data_loader - CustomDatasetDataLoader object - contains a TwoAFCDataset inside
func - callable distance function - calling d=func(in0,in1) should take 2
pytorch tensors with shape Nx3xXxY, and return numpy array of length N
OUTPUTS
[0] - 2AFC score in [0,1], fraction of time func agrees with human evaluators
[1] - dictionary with following elements
d0s,d1s - N arrays containing distances between reference patch to perturbed patches
gts - N array in [0,1], preferred patch selected by human evaluators
(closer to "0" for left patch p0, "1" for right patch p1,
"0.6" means 60pct people preferred right patch, 40pct preferred left)
scores - N array in [0,1], corresponding to what percentage function agreed with humans
CONSTS
N - number of test triplets in data_loader
'''
d0s = []
d1s = []
gts = []
# bar = pb.ProgressBar(max_value=data_loader.load_data().__len__())
for (i,data) in enumerate(data_loader.load_data()):
d0s+=func(data['ref'],data['p0']).tolist()
d1s+=func(data['ref'],data['p1']).tolist()
gts+=data['judge'].cpu().numpy().flatten().tolist()
# bar.update(i)
d0s = np.array(d0s)
d1s = np.array(d1s)
gts = np.array(gts)
scores = (d0s<d1s)*(1.-gts) + (d1s<d0s)*gts + (d1s==d0s)*.5
return(np.mean(scores), dict(d0s=d0s,d1s=d1s,gts=gts,scores=scores))
def score_jnd_dataset(data_loader,func):
''' Function computes JND score using distance function 'func' in dataset 'data_loader'
INPUTS
data_loader - CustomDatasetDataLoader object - contains a JNDDataset inside
func - callable distance function - calling d=func(in0,in1) should take 2
pytorch tensors with shape Nx3xXxY, and return numpy array of length N
OUTPUTS
[0] - JND score in [0,1], mAP score (area under precision-recall curve)
[1] - dictionary with following elements
ds - N array containing distances between two patches shown to human evaluator
sames - N array containing fraction of people who thought the two patches were identical
CONSTS
N - number of test triplets in data_loader
'''
ds = []
gts = []
# bar = pb.ProgressBar(max_value=data_loader.load_data().__len__())
for (i,data) in enumerate(data_loader.load_data()):
ds+=func(data['p0'],data['p1']).tolist()
gts+=data['same'].cpu().numpy().flatten().tolist()
# bar.update(i)
sames = np.array(gts)
ds = np.array(ds)
sorted_inds = np.argsort(ds)
ds_sorted = ds[sorted_inds]
sames_sorted = sames[sorted_inds]
TPs = np.cumsum(sames_sorted)
FPs = np.cumsum(1-sames_sorted)
FNs = np.sum(sames_sorted)-TPs
precs = TPs/(TPs+FPs)
recs = TPs/(TPs+FNs)
score = util.voc_ap(recs,precs)
return(score, dict(ds=ds,sames=sames))
| 41.795107
| 279
| 0.599693
|
from __future__ import absolute_import
import sys
sys.path.append('..')
sys.path.append('.')
import numpy as np
import torch
from torch import nn
import os
from collections import OrderedDict
from torch.autograd import Variable
import itertools
from .base_model import BaseModel
from scipy.ndimage import zoom
import fractions
import functools
import skimage.transform
from IPython import embed
from . import networks_basic as networks
from . import util
class DistModel(BaseModel):
def name(self):
return self.model_name
def initialize(self, model='net-lin', net='alex', pnet_rand=False, pnet_tune=False, model_path=None, colorspace='Lab', use_gpu=True, printNet=False, spatial=False, spatial_shape=None, spatial_order=1, spatial_factor=None, is_train=False, lr=.0001, beta1=0.5, version='0.1'):
BaseModel.initialize(self, use_gpu=use_gpu)
self.model = model
self.net = net
self.use_gpu = use_gpu
self.is_train = is_train
self.spatial = spatial
self.spatial_shape = spatial_shape
self.spatial_order = spatial_order
self.spatial_factor = spatial_factor
self.model_name = '%s [%s]'%(model,net)
if(self.model == 'net-lin'):
self.net = networks.PNetLin(use_gpu=use_gpu,pnet_rand=pnet_rand, pnet_tune=pnet_tune, pnet_type=net,use_dropout=True,spatial=spatial,version=version)
kw = {}
if not use_gpu:
kw['map_location'] = 'cpu'
if(model_path is None):
import inspect
model_path = os.path.abspath(os.path.join(inspect.getfile(self.initialize), '..', 'v%s/%s.pth'%(version,net)))
if(not is_train):
print('Loading model from: %s'%model_path)
self.net.load_state_dict(torch.load(model_path, **kw))
elif(self.model=='net'):
assert not self.spatial, 'spatial argument not supported yet for uncalibrated networks'
self.net = networks.PNet(use_gpu=use_gpu,pnet_type=net)
self.is_fake_net = True
elif(self.model in ['L2','l2']):
self.net = networks.L2(use_gpu=use_gpu,colorspace=colorspace)
self.model_name = 'L2'
elif(self.model in ['DSSIM','dssim','SSIM','ssim']):
self.net = networks.DSSIM(use_gpu=use_gpu,colorspace=colorspace)
self.model_name = 'SSIM'
else:
raise ValueError("Model [%s] not recognized." % self.model)
self.parameters = list(self.net.parameters())
if self.is_train:
self.rankLoss = networks.BCERankingLoss(use_gpu=use_gpu)
self.parameters+=self.rankLoss.parameters
self.lr = lr
self.old_lr = lr
self.optimizer_net = torch.optim.Adam(self.parameters, lr=lr, betas=(beta1, 0.999))
else:
self.net.eval()
if(printNet):
print('---------- Networks initialized -------------')
networks.print_network(self.net)
print('-----------------------------------------------')
def forward_pair(self,in1,in2,retPerLayer=False):
if(retPerLayer):
return self.net.forward(in1,in2, retPerLayer=True)
else:
return self.net.forward(in1,in2)
def forward(self, in0, in1, retNumpy=True):
self.input_ref = in0
self.input_p0 = in1
if(self.use_gpu):
self.input_ref = self.input_ref.cuda()
self.input_p0 = self.input_p0.cuda()
self.var_ref = Variable(self.input_ref,requires_grad=True)
self.var_p0 = Variable(self.input_p0,requires_grad=True)
self.d0 = self.forward_pair(self.var_ref, self.var_p0)
self.loss_total = self.d0
def convert_output(d0):
if(retNumpy):
ans = d0.cpu().data.numpy()
if not self.spatial:
ans = ans.flatten()
else:
assert(ans.shape[0] == 1 and len(ans.shape) == 4)
return ans[0,...].transpose([1, 2, 0])
return ans
else:
return d0
if self.spatial:
L = [convert_output(x) for x in self.d0]
spatial_shape = self.spatial_shape
if spatial_shape is None:
if(self.spatial_factor is None):
spatial_shape = (in0.size()[2],in0.size()[3])
else:
spatial_shape = (max([x.shape[0] for x in L])*self.spatial_factor, max([x.shape[1] for x in L])*self.spatial_factor)
L = [skimage.transform.resize(x, spatial_shape, order=self.spatial_order, mode='edge') for x in L]
L = np.mean(np.concatenate(L, 2) * len(L), 2)
return L
else:
return convert_output(self.d0)
def optimize_parameters(self):
self.forward_train()
self.optimizer_net.zero_grad()
self.backward_train()
self.optimizer_net.step()
self.clamp_weights()
def clamp_weights(self):
for module in self.net.modules():
if(hasattr(module, 'weight') and module.kernel_size==(1,1)):
module.weight.data = torch.clamp(module.weight.data,min=0)
def set_input(self, data):
self.input_ref = data['ref']
self.input_p0 = data['p0']
self.input_p1 = data['p1']
self.input_judge = data['judge']
if(self.use_gpu):
self.input_ref = self.input_ref.cuda()
self.input_p0 = self.input_p0.cuda()
self.input_p1 = self.input_p1.cuda()
self.input_judge = self.input_judge.cuda()
self.var_ref = Variable(self.input_ref,requires_grad=True)
self.var_p0 = Variable(self.input_p0,requires_grad=True)
self.var_p1 = Variable(self.input_p1,requires_grad=True)
def forward_train(self):
self.d0 = self.forward_pair(self.var_ref, self.var_p0)
self.d1 = self.forward_pair(self.var_ref, self.var_p1)
self.acc_r = self.compute_accuracy(self.d0,self.d1,self.input_judge)
self.var_judge = Variable(1.*self.input_judge).view(self.d0.size())
self.loss_total = self.rankLoss.forward(self.d0, self.d1, self.var_judge*2.-1.)
return self.loss_total
def backward_train(self):
torch.mean(self.loss_total).backward()
def compute_accuracy(self,d0,d1,judge):
d1_lt_d0 = (d1<d0).cpu().data.numpy().flatten()
judge_per = judge.cpu().numpy().flatten()
return d1_lt_d0*judge_per + (1-d1_lt_d0)*(1-judge_per)
def get_current_errors(self):
retDict = OrderedDict([('loss_total', self.loss_total.data.cpu().numpy()),
('acc_r', self.acc_r)])
for key in retDict.keys():
retDict[key] = np.mean(retDict[key])
return retDict
def get_current_visuals(self):
zoom_factor = 256/self.var_ref.data.size()[2]
ref_img = util.tensor2im(self.var_ref.data)
p0_img = util.tensor2im(self.var_p0.data)
p1_img = util.tensor2im(self.var_p1.data)
ref_img_vis = zoom(ref_img,[zoom_factor, zoom_factor, 1],order=0)
p0_img_vis = zoom(p0_img,[zoom_factor, zoom_factor, 1],order=0)
p1_img_vis = zoom(p1_img,[zoom_factor, zoom_factor, 1],order=0)
return OrderedDict([('ref', ref_img_vis),
('p0', p0_img_vis),
('p1', p1_img_vis)])
def save(self, path, label):
self.save_network(self.net, path, '', label)
self.save_network(self.rankLoss.net, path, 'rank', label)
def update_learning_rate(self,nepoch_decay):
lrd = self.lr / nepoch_decay
lr = self.old_lr - lrd
for param_group in self.optimizer_net.param_groups:
param_group['lr'] = lr
print('update lr [%s] decay: %f -> %f' % (type,self.old_lr, lr))
self.old_lr = lr
def score_2afc_dataset(data_loader,func):
d0s = []
d1s = []
gts = []
for (i,data) in enumerate(data_loader.load_data()):
d0s+=func(data['ref'],data['p0']).tolist()
d1s+=func(data['ref'],data['p1']).tolist()
gts+=data['judge'].cpu().numpy().flatten().tolist()
d0s = np.array(d0s)
d1s = np.array(d1s)
gts = np.array(gts)
scores = (d0s<d1s)*(1.-gts) + (d1s<d0s)*gts + (d1s==d0s)*.5
return(np.mean(scores), dict(d0s=d0s,d1s=d1s,gts=gts,scores=scores))
def score_jnd_dataset(data_loader,func):
ds = []
gts = []
for (i,data) in enumerate(data_loader.load_data()):
ds+=func(data['p0'],data['p1']).tolist()
gts+=data['same'].cpu().numpy().flatten().tolist()
sames = np.array(gts)
ds = np.array(ds)
sorted_inds = np.argsort(ds)
ds_sorted = ds[sorted_inds]
sames_sorted = sames[sorted_inds]
TPs = np.cumsum(sames_sorted)
FPs = np.cumsum(1-sames_sorted)
FNs = np.sum(sames_sorted)-TPs
precs = TPs/(TPs+FPs)
recs = TPs/(TPs+FNs)
score = util.voc_ap(recs,precs)
return(score, dict(ds=ds,sames=sames))
| true
| true
|
1c49cf543130427f0a2e98a414b5ce3c3321b3df
| 6,004
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/azurestack/latest/customer_subscription.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/azurestack/latest/customer_subscription.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/azurestack/latest/customer_subscription.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['CustomerSubscription']
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:azurestack:CustomerSubscription'.""", DeprecationWarning)
class CustomerSubscription(pulumi.CustomResource):
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:azurestack:CustomerSubscription'.""", DeprecationWarning)
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
customer_subscription_name: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
registration_name: Optional[pulumi.Input[str]] = None,
resource_group: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Customer subscription.
Latest API Version: 2017-06-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] customer_subscription_name: Name of the product.
:param pulumi.Input[str] etag: The entity tag used for optimistic concurrency when modifying the resource.
:param pulumi.Input[str] registration_name: Name of the Azure Stack registration.
:param pulumi.Input[str] resource_group: Name of the resource group.
:param pulumi.Input[str] tenant_id: Tenant Id.
"""
pulumi.log.warn("CustomerSubscription is deprecated: The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:azurestack:CustomerSubscription'.")
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['customer_subscription_name'] = customer_subscription_name
__props__['etag'] = etag
if registration_name is None and not opts.urn:
raise TypeError("Missing required property 'registration_name'")
__props__['registration_name'] = registration_name
if resource_group is None and not opts.urn:
raise TypeError("Missing required property 'resource_group'")
__props__['resource_group'] = resource_group
__props__['tenant_id'] = tenant_id
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:azurestack:CustomerSubscription"), pulumi.Alias(type_="azure-nextgen:azurestack/v20170601:CustomerSubscription"), pulumi.Alias(type_="azure-nextgen:azurestack/v20200601preview:CustomerSubscription")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(CustomerSubscription, __self__).__init__(
'azure-nextgen:azurestack/latest:CustomerSubscription',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'CustomerSubscription':
"""
Get an existing CustomerSubscription resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return CustomerSubscription(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
The entity tag used for optimistic concurrency when modifying the resource.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Output[Optional[str]]:
"""
Tenant Id.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Type of Resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 44.474074
| 287
| 0.659893
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['CustomerSubscription']
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:azurestack:CustomerSubscription'.""", DeprecationWarning)
class CustomerSubscription(pulumi.CustomResource):
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:azurestack:CustomerSubscription'.""", DeprecationWarning)
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
customer_subscription_name: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
registration_name: Optional[pulumi.Input[str]] = None,
resource_group: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
pulumi.log.warn("CustomerSubscription is deprecated: The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:azurestack:CustomerSubscription'.")
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['customer_subscription_name'] = customer_subscription_name
__props__['etag'] = etag
if registration_name is None and not opts.urn:
raise TypeError("Missing required property 'registration_name'")
__props__['registration_name'] = registration_name
if resource_group is None and not opts.urn:
raise TypeError("Missing required property 'resource_group'")
__props__['resource_group'] = resource_group
__props__['tenant_id'] = tenant_id
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:azurestack:CustomerSubscription"), pulumi.Alias(type_="azure-nextgen:azurestack/v20170601:CustomerSubscription"), pulumi.Alias(type_="azure-nextgen:azurestack/v20200601preview:CustomerSubscription")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(CustomerSubscription, __self__).__init__(
'azure-nextgen:azurestack/latest:CustomerSubscription',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'CustomerSubscription':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return CustomerSubscription(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true
| true
|
1c49cfd95d576b090d11ac58239cd27ff7c71312
| 553
|
py
|
Python
|
scripts/RunServer.py
|
ekg/shasta
|
e2fd3c3d79fb4cafe77c62f6af2fef46f7a04b01
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/RunServer.py
|
ekg/shasta
|
e2fd3c3d79fb4cafe77c62f6af2fef46f7a04b01
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/RunServer.py
|
ekg/shasta
|
e2fd3c3d79fb4cafe77c62f6af2fef46f7a04b01
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python3
import os
import shasta
import GetConfig
# Find the path to the docs directory.
thisScriptPath = os.path.realpath(__file__)
thisScriptDirectory = os.path.dirname(thisScriptPath)
thisScriptParentDirectory = os.path.dirname(thisScriptDirectory)
docsDirectory = thisScriptParentDirectory + '/docs'
# Read the config file.
config = GetConfig.getConfig()
# Initialize the assembler.
a = shasta.Assembler()
a.accessAllSoft()
a.setupConsensusCaller(config['Assembly']['consensusCaller'])
a.setDocsDirectory(docsDirectory)
a.explore()
| 22.12
| 64
| 0.793852
|
import os
import shasta
import GetConfig
thisScriptPath = os.path.realpath(__file__)
thisScriptDirectory = os.path.dirname(thisScriptPath)
thisScriptParentDirectory = os.path.dirname(thisScriptDirectory)
docsDirectory = thisScriptParentDirectory + '/docs'
config = GetConfig.getConfig()
a = shasta.Assembler()
a.accessAllSoft()
a.setupConsensusCaller(config['Assembly']['consensusCaller'])
a.setDocsDirectory(docsDirectory)
a.explore()
| true
| true
|
1c49d18f431832a15fe255123e31a03b0a805e27
| 5,159
|
py
|
Python
|
settings.py
|
garyp/djwed
|
a3cecfa77f55574fecc05621a33d0cdd20a85fb2
|
[
"MIT"
] | 1
|
2021-01-27T09:56:00.000Z
|
2021-01-27T09:56:00.000Z
|
settings.py
|
garyp/djwed
|
a3cecfa77f55574fecc05621a33d0cdd20a85fb2
|
[
"MIT"
] | null | null | null |
settings.py
|
garyp/djwed
|
a3cecfa77f55574fecc05621a33d0cdd20a85fb2
|
[
"MIT"
] | null | null | null |
# Django settings for djwed project.
import logging
from socket import gethostname
production = False
# This is the production hostname (as I'd develop on a desktop and then run
# the system on a colo server.)
if 'my-colo-server' == gethostname().split(".")[0]:
production = True
if not production:
DEBUG = True
else:
DEBUG = False
TEMPLATE_DEBUG = DEBUG
logging.basicConfig(level=logging.INFO)
ADMINS = (
('Ben Bitdiddle', 'benb@example.org'),
)
MANAGERS = (
('Ben Bitdiddle', 'benb@example.org'),
('Alyssa P Hacker', 'aphacker@example.org'),
)
# Default From email address used by djwed when sending emails, with a display
# name as well
FROM_EMAIL = ('Alyssa & Ben', 'ab@example.org')
# Default From email address used by Django when emailing errors and
# notifications
DEFAULT_FROM_EMAIL = SERVER_EMAIL = FROM_EMAIL[1]
WEDDING_NAMES = 'Alyssa and Ben'
# By keeping the database in a sqlite3 file, I was able to check it into
# subversion and easily make copies to the staging environment.
# The performance of sqlite3 was just fine for a single-wedding environment
# as this was intended for.
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
# Path to base of code, as well to database file if using sqlite3.
# Change these file paths as appropriate.
if production:
WEDDING_BASE = '/www/wedding'
DATABASE_NAME = WEDDING_BASE + '/data/weddingdata.sqlite'
else:
WEDDING_BASE = '/u/media/project/wedding/website/djwed'
DATABASE_NAME = WEDDING_BASE + '/data/weddingdata-test.sqlite'
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
EMAIL_HOST = 'localhost'
if production:
SEND_EMAIL = True
EMAIL_PORT = 25
else:
SEND_EMAIL = False
EMAIL_PORT = 1025
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'US/Eastern'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Authentication backend classes (as strings) to use when attempting to authenticate a user.
AUTHENTICATION_BACKENDS = (
'djwed.wedding.auth.InviteeAuthBackend',
'django.contrib.auth.backends.ModelBackend'
)
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = WEDDING_BASE + '/media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
if production:
MEDIA_URL = 'http://wedding.example.org/media/'
else:
MEDIA_URL = 'http://localhost:8000/static/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/admin/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'CHANGE_ME_TO_SOME_OTHER_RANDOM_STRING!!'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
# Needs to be at the end
'django.contrib.csrf.middleware.CsrfMiddleware',
)
ROOT_URLCONF = 'djwed.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
WEDDING_BASE + '/templates/',
WEDDING_BASE + '/photologue/templates/photologue/templates/',
WEDDING_BASE + '/photologue/templates/',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request",
#"django.contrib.messages.context_processors.messages"
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'djwed.wedding',
'tagging',
'photologue',
'south',
)
| 32.043478
| 101
| 0.724559
|
import logging
from socket import gethostname
production = False
# the system on a colo server.)
if 'my-colo-server' == gethostname().split(".")[0]:
production = True
if not production:
DEBUG = True
else:
DEBUG = False
TEMPLATE_DEBUG = DEBUG
logging.basicConfig(level=logging.INFO)
ADMINS = (
('Ben Bitdiddle', 'benb@example.org'),
)
MANAGERS = (
('Ben Bitdiddle', 'benb@example.org'),
('Alyssa P Hacker', 'aphacker@example.org'),
)
# Default From email address used by djwed when sending emails, with a display
# name as well
FROM_EMAIL = ('Alyssa & Ben', 'ab@example.org')
# Default From email address used by Django when emailing errors and
# notifications
DEFAULT_FROM_EMAIL = SERVER_EMAIL = FROM_EMAIL[1]
WEDDING_NAMES = 'Alyssa and Ben'
# By keeping the database in a sqlite3 file, I was able to check it into
# subversion and easily make copies to the staging environment.
# The performance of sqlite3 was just fine for a single-wedding environment
# as this was intended for.
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
# Path to base of code, as well to database file if using sqlite3.
# Change these file paths as appropriate.
if production:
WEDDING_BASE = '/www/wedding'
DATABASE_NAME = WEDDING_BASE + '/data/weddingdata.sqlite'
else:
WEDDING_BASE = '/u/media/project/wedding/website/djwed'
DATABASE_NAME = WEDDING_BASE + '/data/weddingdata-test.sqlite'
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
EMAIL_HOST = 'localhost'
if production:
SEND_EMAIL = True
EMAIL_PORT = 25
else:
SEND_EMAIL = False
EMAIL_PORT = 1025
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'US/Eastern'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Authentication backend classes (as strings) to use when attempting to authenticate a user.
AUTHENTICATION_BACKENDS = (
'djwed.wedding.auth.InviteeAuthBackend',
'django.contrib.auth.backends.ModelBackend'
)
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = WEDDING_BASE + '/media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
if production:
MEDIA_URL = 'http://wedding.example.org/media/'
else:
MEDIA_URL = 'http://localhost:8000/static/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/admin/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'CHANGE_ME_TO_SOME_OTHER_RANDOM_STRING!!'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.csrf.middleware.CsrfMiddleware',
)
ROOT_URLCONF = 'djwed.urls'
TEMPLATE_DIRS = (
WEDDING_BASE + '/templates/',
WEDDING_BASE + '/photologue/templates/photologue/templates/',
WEDDING_BASE + '/photologue/templates/',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request",
#"django.contrib.messages.context_processors.messages"
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'djwed.wedding',
'tagging',
'photologue',
'south',
)
| true
| true
|
1c49d22b406a20082fc4ddf2a42ae03c8ebb4cf3
| 1,325
|
py
|
Python
|
spanner/google/cloud/spanner.py
|
bomboradata/bombora-google-cloud-python
|
255bbebe6c50490f40fcc3eed40bae1e77e03859
|
[
"Apache-2.0"
] | null | null | null |
spanner/google/cloud/spanner.py
|
bomboradata/bombora-google-cloud-python
|
255bbebe6c50490f40fcc3eed40bae1e77e03859
|
[
"Apache-2.0"
] | null | null | null |
spanner/google/cloud/spanner.py
|
bomboradata/bombora-google-cloud-python
|
255bbebe6c50490f40fcc3eed40bae1e77e03859
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cloud Spanner API package."""
from __future__ import absolute_import
from google.cloud.spanner_v1 import __version__
from google.cloud.spanner_v1 import AbstractSessionPool
from google.cloud.spanner_v1 import BurstyPool
from google.cloud.spanner_v1 import Client
from google.cloud.spanner_v1 import enums
from google.cloud.spanner_v1 import FixedSizePool
from google.cloud.spanner_v1 import KeyRange
from google.cloud.spanner_v1 import KeySet
from google.cloud.spanner_v1 import param_types
from google.cloud.spanner_v1 import types
__all__ = (
'__version__',
'AbstractSessionPool',
'BurstyPool',
'Client',
'enums',
'FixedSizePool',
'KeyRange',
'KeySet',
'param_types',
'types',
)
| 30.813953
| 74
| 0.767547
|
from __future__ import absolute_import
from google.cloud.spanner_v1 import __version__
from google.cloud.spanner_v1 import AbstractSessionPool
from google.cloud.spanner_v1 import BurstyPool
from google.cloud.spanner_v1 import Client
from google.cloud.spanner_v1 import enums
from google.cloud.spanner_v1 import FixedSizePool
from google.cloud.spanner_v1 import KeyRange
from google.cloud.spanner_v1 import KeySet
from google.cloud.spanner_v1 import param_types
from google.cloud.spanner_v1 import types
__all__ = (
'__version__',
'AbstractSessionPool',
'BurstyPool',
'Client',
'enums',
'FixedSizePool',
'KeyRange',
'KeySet',
'param_types',
'types',
)
| true
| true
|
1c49d2430b558f5439579e9257bf1022eef95d92
| 583
|
py
|
Python
|
script.plexodus/service.py
|
MR-Unknown-Cm/addons
|
8df1ebe58c95620bb02a05dbae7bf37954915cbd
|
[
"Apache-2.0"
] | 1
|
2020-03-03T10:01:21.000Z
|
2020-03-03T10:01:21.000Z
|
script.plexodus/service.py
|
MR-Unknown-Cm/addons
|
8df1ebe58c95620bb02a05dbae7bf37954915cbd
|
[
"Apache-2.0"
] | null | null | null |
script.plexodus/service.py
|
MR-Unknown-Cm/addons
|
8df1ebe58c95620bb02a05dbae7bf37954915cbd
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import xbmc
import xbmcgui
import xbmcaddon
def main():
if xbmc.getInfoLabel('Window(10000).Property(script.plexodus.service.started)'):
# Prevent add-on updates from starting a new version of the addon
return
xbmcgui.Window(10000).setProperty('script.plexodus.service.started', '1')
if xbmcaddon.Addon().getSetting('kiosk.mode') == 'true':
xbmc.log('script.plexodus: Starting from service (Kiosk Mode)', xbmc.LOGNOTICE)
xbmc.executebuiltin('RunScript(script.plexodus)')
if __name__ == '__main__':
main()
| 27.761905
| 87
| 0.686106
|
import xbmc
import xbmcgui
import xbmcaddon
def main():
if xbmc.getInfoLabel('Window(10000).Property(script.plexodus.service.started)'):
return
xbmcgui.Window(10000).setProperty('script.plexodus.service.started', '1')
if xbmcaddon.Addon().getSetting('kiosk.mode') == 'true':
xbmc.log('script.plexodus: Starting from service (Kiosk Mode)', xbmc.LOGNOTICE)
xbmc.executebuiltin('RunScript(script.plexodus)')
if __name__ == '__main__':
main()
| true
| true
|
1c49d2ea6b5f5d7265c638d9c946745c6efc2c2a
| 1,696
|
py
|
Python
|
examples/p3/Ball.py
|
djpeach/pygamer
|
77a0cdab58bc29d06cc88c8cc823850794fe0bf0
|
[
"MIT"
] | null | null | null |
examples/p3/Ball.py
|
djpeach/pygamer
|
77a0cdab58bc29d06cc88c8cc823850794fe0bf0
|
[
"MIT"
] | null | null | null |
examples/p3/Ball.py
|
djpeach/pygamer
|
77a0cdab58bc29d06cc88c8cc823850794fe0bf0
|
[
"MIT"
] | null | null | null |
import pygame
import pygamer
import time
class Ball(pygamer.Object):
def __init__(self, speed, color, radius, center_position):
rect = pygame.rect.Rect(center_position[0] - radius, center_position[1] - radius, radius * 2, radius * 2)
super().__init__(rect, speed)
self.color = color
self.radius = radius
self.diameter = radius * 2
self.initital_position = center_position
self.scored = False
def check_bounds(self, screen):
x, y = self.speed
if self.left < screen.left:
self.rect = pygame.rect.Rect(0, self.top, self.width, self.height)
self.speed = (0, 0)
self.scored = True
elif self.right > screen.right:
self.rect = pygame.rect.Rect(screen.right - self.width, self.top, self.width, self.height)
self.speed = (0, 0)
self.scored = True
elif self.top < screen.top:
self.rect = pygame.rect.Rect(self.left, 0, self.width, self.height)
self.speed = (x, -y)
elif self.bottom > screen.bottom:
self.rect = pygame.rect.Rect(self.left, screen.bottom - self.height, self.width, self.height)
self.speed = (x, -y)
def check_paddle_collisions(self, paddle, reset_x):
x, y = self.speed
if paddle.rect.colliderect(self):
if abs(x) < 15:
x *= 1.3
self.rect = pygame.rect.Rect(reset_x, self.top, self.width, self.height)
self.speed = (-x, y)
def draw(self, surface_to_draw_on):
pygame.draw.circle(surface_to_draw_on, self.color, (self.rect.x + self.radius, self.rect.y + self.radius), self.radius)
| 39.44186
| 127
| 0.598467
|
import pygame
import pygamer
import time
class Ball(pygamer.Object):
def __init__(self, speed, color, radius, center_position):
rect = pygame.rect.Rect(center_position[0] - radius, center_position[1] - radius, radius * 2, radius * 2)
super().__init__(rect, speed)
self.color = color
self.radius = radius
self.diameter = radius * 2
self.initital_position = center_position
self.scored = False
def check_bounds(self, screen):
x, y = self.speed
if self.left < screen.left:
self.rect = pygame.rect.Rect(0, self.top, self.width, self.height)
self.speed = (0, 0)
self.scored = True
elif self.right > screen.right:
self.rect = pygame.rect.Rect(screen.right - self.width, self.top, self.width, self.height)
self.speed = (0, 0)
self.scored = True
elif self.top < screen.top:
self.rect = pygame.rect.Rect(self.left, 0, self.width, self.height)
self.speed = (x, -y)
elif self.bottom > screen.bottom:
self.rect = pygame.rect.Rect(self.left, screen.bottom - self.height, self.width, self.height)
self.speed = (x, -y)
def check_paddle_collisions(self, paddle, reset_x):
x, y = self.speed
if paddle.rect.colliderect(self):
if abs(x) < 15:
x *= 1.3
self.rect = pygame.rect.Rect(reset_x, self.top, self.width, self.height)
self.speed = (-x, y)
def draw(self, surface_to_draw_on):
pygame.draw.circle(surface_to_draw_on, self.color, (self.rect.x + self.radius, self.rect.y + self.radius), self.radius)
| true
| true
|
1c49d35a318a1a44c92df86786fbdadfd07a7a15
| 196
|
py
|
Python
|
detectron/lib/python3.6/site-packages/torchvision/version.py
|
JustinBear99/Mask_RCNN
|
d43eaf7c6ebf29d4d6da796a0f7ff5561e21acff
|
[
"Apache-2.0"
] | null | null | null |
detectron/lib/python3.6/site-packages/torchvision/version.py
|
JustinBear99/Mask_RCNN
|
d43eaf7c6ebf29d4d6da796a0f7ff5561e21acff
|
[
"Apache-2.0"
] | null | null | null |
detectron/lib/python3.6/site-packages/torchvision/version.py
|
JustinBear99/Mask_RCNN
|
d43eaf7c6ebf29d4d6da796a0f7ff5561e21acff
|
[
"Apache-2.0"
] | null | null | null |
__version__ = '0.6.1'
git_version = '35d732ac53aebbed917993523d685b4cb09ef6ea'
from torchvision.extension import _check_cuda_version
if _check_cuda_version() > 0:
cuda = _check_cuda_version()
| 32.666667
| 56
| 0.811224
|
__version__ = '0.6.1'
git_version = '35d732ac53aebbed917993523d685b4cb09ef6ea'
from torchvision.extension import _check_cuda_version
if _check_cuda_version() > 0:
cuda = _check_cuda_version()
| true
| true
|
1c49d3ed3d448eb8beffb819f40560927ff5b27b
| 12,269
|
py
|
Python
|
testtools/tests/test_run.py
|
sparkiegeek/testtools
|
f86658ac18521db4254e7292c4a4dda6017d930e
|
[
"MIT"
] | null | null | null |
testtools/tests/test_run.py
|
sparkiegeek/testtools
|
f86658ac18521db4254e7292c4a4dda6017d930e
|
[
"MIT"
] | null | null | null |
testtools/tests/test_run.py
|
sparkiegeek/testtools
|
f86658ac18521db4254e7292c4a4dda6017d930e
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2010 testtools developers. See LICENSE for details.
"""Tests for the test runner logic."""
import doctest
import io
from unittest import TestSuite
import sys
from textwrap import dedent
from extras import try_import
fixtures = try_import('fixtures')
testresources = try_import('testresources')
import unittest
import testtools
from testtools import TestCase, run, skipUnless
from testtools.compat import (
_b,
)
from testtools.matchers import (
Contains,
DocTestMatches,
MatchesRegex,
)
if fixtures:
class SampleTestFixture(fixtures.Fixture):
"""Creates testtools.runexample temporarily."""
def __init__(self, broken=False):
"""Create a SampleTestFixture.
:param broken: If True, the sample file will not be importable.
"""
if not broken:
init_contents = _b("""\
from testtools import TestCase
class TestFoo(TestCase):
def test_bar(self):
pass
def test_quux(self):
pass
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
""")
else:
init_contents = b"class not in\n"
self.package = fixtures.PythonPackage(
'runexample', [('__init__.py', init_contents)])
def setUp(self):
super().setUp()
self.useFixture(self.package)
testtools.__path__.append(self.package.base)
self.addCleanup(testtools.__path__.remove, self.package.base)
self.addCleanup(sys.modules.pop, 'testtools.runexample', None)
if fixtures and testresources:
class SampleResourcedFixture(fixtures.Fixture):
"""Creates a test suite that uses testresources."""
def __init__(self):
super().__init__()
self.package = fixtures.PythonPackage(
'resourceexample', [('__init__.py', _b("""
from fixtures import Fixture
from testresources import (
FixtureResource,
OptimisingTestSuite,
ResourcedTestCase,
)
from testtools import TestCase
class Printer(Fixture):
def setUp(self):
super(Printer, self).setUp()
print('Setting up Printer')
def reset(self):
pass
class TestFoo(TestCase, ResourcedTestCase):
# When run, this will print just one Setting up Printer, unless the
# OptimisingTestSuite is not honoured, when one per test case will print.
resources=[('res', FixtureResource(Printer()))]
def test_bar(self):
pass
def test_foo(self):
pass
def test_quux(self):
pass
def test_suite():
from unittest import TestLoader
return OptimisingTestSuite(TestLoader().loadTestsFromName(__name__))
"""))])
def setUp(self):
super().setUp()
self.useFixture(self.package)
self.addCleanup(testtools.__path__.remove, self.package.base)
testtools.__path__.append(self.package.base)
if fixtures:
class SampleLoadTestsPackage(fixtures.Fixture):
"""Creates a test suite package using load_tests."""
def __init__(self):
super().__init__()
self.package = fixtures.PythonPackage(
'discoverexample', [('__init__.py', _b("""
from testtools import TestCase, clone_test_with_new_id
class TestExample(TestCase):
def test_foo(self):
pass
def load_tests(loader, tests, pattern):
tests.addTest(clone_test_with_new_id(tests._tests[1]._tests[0], "fred"))
return tests
"""))])
def setUp(self):
super().setUp()
self.useFixture(self.package)
self.addCleanup(sys.path.remove, self.package.base)
class TestRun(TestCase):
def setUp(self):
super().setUp()
if fixtures is None:
self.skipTest("Need fixtures")
def test_run_custom_list(self):
self.useFixture(SampleTestFixture())
tests = []
class CaptureList(run.TestToolsTestRunner):
def list(self, test):
tests.append({case.id() for case
in testtools.testsuite.iterate_tests(test)})
out = io.StringIO()
try:
program = run.TestProgram(
argv=['prog', '-l', 'testtools.runexample.test_suite'],
stdout=out, testRunner=CaptureList)
except SystemExit:
exc_info = sys.exc_info()
raise AssertionError("-l tried to exit. %r" % exc_info[1])
self.assertEqual([{'testtools.runexample.TestFoo.test_bar',
'testtools.runexample.TestFoo.test_quux'}], tests)
def test_run_list_with_loader(self):
# list() is attempted with a loader first.
self.useFixture(SampleTestFixture())
tests = []
class CaptureList(run.TestToolsTestRunner):
def list(self, test, loader=None):
tests.append({case.id() for case
in testtools.testsuite.iterate_tests(test)})
tests.append(loader)
out = io.StringIO()
try:
program = run.TestProgram(
argv=['prog', '-l', 'testtools.runexample.test_suite'],
stdout=out, testRunner=CaptureList)
except SystemExit:
exc_info = sys.exc_info()
raise AssertionError("-l tried to exit. %r" % exc_info[1])
self.assertEqual([{'testtools.runexample.TestFoo.test_bar',
'testtools.runexample.TestFoo.test_quux'}, program.testLoader],
tests)
def test_run_list(self):
self.useFixture(SampleTestFixture())
out = io.StringIO()
try:
run.main(['prog', '-l', 'testtools.runexample.test_suite'], out)
except SystemExit:
exc_info = sys.exc_info()
raise AssertionError("-l tried to exit. %r" % exc_info[1])
self.assertEqual("""testtools.runexample.TestFoo.test_bar
testtools.runexample.TestFoo.test_quux
""", out.getvalue())
def test_run_list_failed_import(self):
broken = self.useFixture(SampleTestFixture(broken=True))
out = io.StringIO()
# XXX: http://bugs.python.org/issue22811
unittest.defaultTestLoader._top_level_dir = None
exc = self.assertRaises(
SystemExit,
run.main, ['prog', 'discover', '-l', broken.package.base, '*.py'], out)
self.assertEqual(2, exc.args[0])
self.assertThat(out.getvalue(), DocTestMatches("""\
unittest.loader._FailedTest.runexample
Failed to import test module: runexample
Traceback (most recent call last):
File ".../loader.py", line ..., in _find_test_path
package = self._get_module_from_name(name)
File ".../loader.py", line ..., in _get_module_from_name
__import__(name)
File ".../runexample/__init__.py", line 1
class not in
...^...
SyntaxError: invalid syntax
""", doctest.ELLIPSIS))
def test_run_orders_tests(self):
self.useFixture(SampleTestFixture())
out = io.StringIO()
# We load two tests - one that exists and one that doesn't, and we
# should get the one that exists and neither the one that doesn't nor
# the unmentioned one that does.
tempdir = self.useFixture(fixtures.TempDir())
tempname = tempdir.path + '/tests.list'
f = open(tempname, 'wb')
try:
f.write(_b("""
testtools.runexample.TestFoo.test_bar
testtools.runexample.missingtest
"""))
finally:
f.close()
try:
run.main(['prog', '-l', '--load-list', tempname,
'testtools.runexample.test_suite'], out)
except SystemExit:
exc_info = sys.exc_info()
raise AssertionError(
"-l --load-list tried to exit. %r" % exc_info[1])
self.assertEqual("""testtools.runexample.TestFoo.test_bar
""", out.getvalue())
def test_run_load_list(self):
self.useFixture(SampleTestFixture())
out = io.StringIO()
# We load two tests - one that exists and one that doesn't, and we
# should get the one that exists and neither the one that doesn't nor
# the unmentioned one that does.
tempdir = self.useFixture(fixtures.TempDir())
tempname = tempdir.path + '/tests.list'
f = open(tempname, 'wb')
try:
f.write(_b("""
testtools.runexample.TestFoo.test_bar
testtools.runexample.missingtest
"""))
finally:
f.close()
try:
run.main(['prog', '-l', '--load-list', tempname,
'testtools.runexample.test_suite'], out)
except SystemExit:
exc_info = sys.exc_info()
raise AssertionError(
"-l --load-list tried to exit. %r" % exc_info[1])
self.assertEqual("""testtools.runexample.TestFoo.test_bar
""", out.getvalue())
def test_load_list_preserves_custom_suites(self):
if testresources is None:
self.skipTest("Need testresources")
self.useFixture(SampleResourcedFixture())
# We load two tests, not loading one. Both share a resource, so we
# should see just one resource setup occur.
tempdir = self.useFixture(fixtures.TempDir())
tempname = tempdir.path + '/tests.list'
f = open(tempname, 'wb')
try:
f.write(_b("""
testtools.resourceexample.TestFoo.test_bar
testtools.resourceexample.TestFoo.test_foo
"""))
finally:
f.close()
stdout = self.useFixture(fixtures.StringStream('stdout'))
with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
try:
run.main(['prog', '--load-list', tempname,
'testtools.resourceexample.test_suite'], stdout.stream)
except SystemExit:
# Evil resides in TestProgram.
pass
out = stdout.getDetails()['stdout'].as_text()
self.assertEqual(1, out.count('Setting up Printer'), "%r" % out)
def test_run_failfast(self):
stdout = self.useFixture(fixtures.StringStream('stdout'))
class Failing(TestCase):
def test_a(self):
self.fail('a')
def test_b(self):
self.fail('b')
with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
runner = run.TestToolsTestRunner(failfast=True)
runner.run(TestSuite([Failing('test_a'), Failing('test_b')]))
self.assertThat(
stdout.getDetails()['stdout'].as_text(), Contains('Ran 1 test'))
def test_run_locals(self):
stdout = self.useFixture(fixtures.StringStream('stdout'))
class Failing(TestCase):
def test_a(self):
a = 1
self.fail('a')
runner = run.TestToolsTestRunner(tb_locals=True, stdout=stdout.stream)
runner.run(Failing('test_a'))
self.assertThat(
stdout.getDetails()['stdout'].as_text(), Contains('a = 1'))
def test_stdout_honoured(self):
self.useFixture(SampleTestFixture())
tests = []
out = io.StringIO()
exc = self.assertRaises(SystemExit, run.main,
argv=['prog', 'testtools.runexample.test_suite'],
stdout=out)
self.assertEqual((0,), exc.args)
self.assertThat(
out.getvalue(),
MatchesRegex("""Tests running...
Ran 2 tests in \\d.\\d\\d\\ds
OK
"""))
@skipUnless(fixtures, "fixtures not present")
def test_issue_16662(self):
# unittest's discover implementation didn't handle load_tests on
# packages. That is fixed pending commit, but we want to offer it
# to all testtools users regardless of Python version.
# See http://bugs.python.org/issue16662
pkg = self.useFixture(SampleLoadTestsPackage())
out = io.StringIO()
# XXX: http://bugs.python.org/issue22811
unittest.defaultTestLoader._top_level_dir = None
self.assertEqual(None, run.main(
['prog', 'discover', '-l', pkg.package.base], out))
self.assertEqual(dedent("""\
discoverexample.TestExample.test_foo
fred
"""), out.getvalue())
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
| 34.175487
| 83
| 0.615454
|
import doctest
import io
from unittest import TestSuite
import sys
from textwrap import dedent
from extras import try_import
fixtures = try_import('fixtures')
testresources = try_import('testresources')
import unittest
import testtools
from testtools import TestCase, run, skipUnless
from testtools.compat import (
_b,
)
from testtools.matchers import (
Contains,
DocTestMatches,
MatchesRegex,
)
if fixtures:
class SampleTestFixture(fixtures.Fixture):
def __init__(self, broken=False):
if not broken:
init_contents = _b("""\
from testtools import TestCase
class TestFoo(TestCase):
def test_bar(self):
pass
def test_quux(self):
pass
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
""")
else:
init_contents = b"class not in\n"
self.package = fixtures.PythonPackage(
'runexample', [('__init__.py', init_contents)])
def setUp(self):
super().setUp()
self.useFixture(self.package)
testtools.__path__.append(self.package.base)
self.addCleanup(testtools.__path__.remove, self.package.base)
self.addCleanup(sys.modules.pop, 'testtools.runexample', None)
if fixtures and testresources:
class SampleResourcedFixture(fixtures.Fixture):
def __init__(self):
super().__init__()
self.package = fixtures.PythonPackage(
'resourceexample', [('__init__.py', _b("""
from fixtures import Fixture
from testresources import (
FixtureResource,
OptimisingTestSuite,
ResourcedTestCase,
)
from testtools import TestCase
class Printer(Fixture):
def setUp(self):
super(Printer, self).setUp()
print('Setting up Printer')
def reset(self):
pass
class TestFoo(TestCase, ResourcedTestCase):
# When run, this will print just one Setting up Printer, unless the
# OptimisingTestSuite is not honoured, when one per test case will print.
resources=[('res', FixtureResource(Printer()))]
def test_bar(self):
pass
def test_foo(self):
pass
def test_quux(self):
pass
def test_suite():
from unittest import TestLoader
return OptimisingTestSuite(TestLoader().loadTestsFromName(__name__))
"""))])
def setUp(self):
super().setUp()
self.useFixture(self.package)
self.addCleanup(testtools.__path__.remove, self.package.base)
testtools.__path__.append(self.package.base)
if fixtures:
class SampleLoadTestsPackage(fixtures.Fixture):
def __init__(self):
super().__init__()
self.package = fixtures.PythonPackage(
'discoverexample', [('__init__.py', _b("""
from testtools import TestCase, clone_test_with_new_id
class TestExample(TestCase):
def test_foo(self):
pass
def load_tests(loader, tests, pattern):
tests.addTest(clone_test_with_new_id(tests._tests[1]._tests[0], "fred"))
return tests
"""))])
def setUp(self):
super().setUp()
self.useFixture(self.package)
self.addCleanup(sys.path.remove, self.package.base)
class TestRun(TestCase):
def setUp(self):
super().setUp()
if fixtures is None:
self.skipTest("Need fixtures")
def test_run_custom_list(self):
self.useFixture(SampleTestFixture())
tests = []
class CaptureList(run.TestToolsTestRunner):
def list(self, test):
tests.append({case.id() for case
in testtools.testsuite.iterate_tests(test)})
out = io.StringIO()
try:
program = run.TestProgram(
argv=['prog', '-l', 'testtools.runexample.test_suite'],
stdout=out, testRunner=CaptureList)
except SystemExit:
exc_info = sys.exc_info()
raise AssertionError("-l tried to exit. %r" % exc_info[1])
self.assertEqual([{'testtools.runexample.TestFoo.test_bar',
'testtools.runexample.TestFoo.test_quux'}], tests)
def test_run_list_with_loader(self):
self.useFixture(SampleTestFixture())
tests = []
class CaptureList(run.TestToolsTestRunner):
def list(self, test, loader=None):
tests.append({case.id() for case
in testtools.testsuite.iterate_tests(test)})
tests.append(loader)
out = io.StringIO()
try:
program = run.TestProgram(
argv=['prog', '-l', 'testtools.runexample.test_suite'],
stdout=out, testRunner=CaptureList)
except SystemExit:
exc_info = sys.exc_info()
raise AssertionError("-l tried to exit. %r" % exc_info[1])
self.assertEqual([{'testtools.runexample.TestFoo.test_bar',
'testtools.runexample.TestFoo.test_quux'}, program.testLoader],
tests)
def test_run_list(self):
self.useFixture(SampleTestFixture())
out = io.StringIO()
try:
run.main(['prog', '-l', 'testtools.runexample.test_suite'], out)
except SystemExit:
exc_info = sys.exc_info()
raise AssertionError("-l tried to exit. %r" % exc_info[1])
self.assertEqual("""testtools.runexample.TestFoo.test_bar
testtools.runexample.TestFoo.test_quux
""", out.getvalue())
def test_run_list_failed_import(self):
broken = self.useFixture(SampleTestFixture(broken=True))
out = io.StringIO()
unittest.defaultTestLoader._top_level_dir = None
exc = self.assertRaises(
SystemExit,
run.main, ['prog', 'discover', '-l', broken.package.base, '*.py'], out)
self.assertEqual(2, exc.args[0])
self.assertThat(out.getvalue(), DocTestMatches("""\
unittest.loader._FailedTest.runexample
Failed to import test module: runexample
Traceback (most recent call last):
File ".../loader.py", line ..., in _find_test_path
package = self._get_module_from_name(name)
File ".../loader.py", line ..., in _get_module_from_name
__import__(name)
File ".../runexample/__init__.py", line 1
class not in
...^...
SyntaxError: invalid syntax
""", doctest.ELLIPSIS))
def test_run_orders_tests(self):
self.useFixture(SampleTestFixture())
out = io.StringIO()
# should get the one that exists and neither the one that doesn't nor
tempdir = self.useFixture(fixtures.TempDir())
tempname = tempdir.path + '/tests.list'
f = open(tempname, 'wb')
try:
f.write(_b("""
testtools.runexample.TestFoo.test_bar
testtools.runexample.missingtest
"""))
finally:
f.close()
try:
run.main(['prog', '-l', '--load-list', tempname,
'testtools.runexample.test_suite'], out)
except SystemExit:
exc_info = sys.exc_info()
raise AssertionError(
"-l --load-list tried to exit. %r" % exc_info[1])
self.assertEqual("""testtools.runexample.TestFoo.test_bar
""", out.getvalue())
def test_run_load_list(self):
self.useFixture(SampleTestFixture())
out = io.StringIO()
# should get the one that exists and neither the one that doesn't nor
tempdir = self.useFixture(fixtures.TempDir())
tempname = tempdir.path + '/tests.list'
f = open(tempname, 'wb')
try:
f.write(_b("""
testtools.runexample.TestFoo.test_bar
testtools.runexample.missingtest
"""))
finally:
f.close()
try:
run.main(['prog', '-l', '--load-list', tempname,
'testtools.runexample.test_suite'], out)
except SystemExit:
exc_info = sys.exc_info()
raise AssertionError(
"-l --load-list tried to exit. %r" % exc_info[1])
self.assertEqual("""testtools.runexample.TestFoo.test_bar
""", out.getvalue())
def test_load_list_preserves_custom_suites(self):
if testresources is None:
self.skipTest("Need testresources")
self.useFixture(SampleResourcedFixture())
tempdir = self.useFixture(fixtures.TempDir())
tempname = tempdir.path + '/tests.list'
f = open(tempname, 'wb')
try:
f.write(_b("""
testtools.resourceexample.TestFoo.test_bar
testtools.resourceexample.TestFoo.test_foo
"""))
finally:
f.close()
stdout = self.useFixture(fixtures.StringStream('stdout'))
with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
try:
run.main(['prog', '--load-list', tempname,
'testtools.resourceexample.test_suite'], stdout.stream)
except SystemExit:
pass
out = stdout.getDetails()['stdout'].as_text()
self.assertEqual(1, out.count('Setting up Printer'), "%r" % out)
def test_run_failfast(self):
stdout = self.useFixture(fixtures.StringStream('stdout'))
class Failing(TestCase):
def test_a(self):
self.fail('a')
def test_b(self):
self.fail('b')
with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
runner = run.TestToolsTestRunner(failfast=True)
runner.run(TestSuite([Failing('test_a'), Failing('test_b')]))
self.assertThat(
stdout.getDetails()['stdout'].as_text(), Contains('Ran 1 test'))
def test_run_locals(self):
stdout = self.useFixture(fixtures.StringStream('stdout'))
class Failing(TestCase):
def test_a(self):
a = 1
self.fail('a')
runner = run.TestToolsTestRunner(tb_locals=True, stdout=stdout.stream)
runner.run(Failing('test_a'))
self.assertThat(
stdout.getDetails()['stdout'].as_text(), Contains('a = 1'))
def test_stdout_honoured(self):
self.useFixture(SampleTestFixture())
tests = []
out = io.StringIO()
exc = self.assertRaises(SystemExit, run.main,
argv=['prog', 'testtools.runexample.test_suite'],
stdout=out)
self.assertEqual((0,), exc.args)
self.assertThat(
out.getvalue(),
MatchesRegex("""Tests running...
Ran 2 tests in \\d.\\d\\d\\ds
OK
"""))
@skipUnless(fixtures, "fixtures not present")
def test_issue_16662(self):
pkg = self.useFixture(SampleLoadTestsPackage())
out = io.StringIO()
unittest.defaultTestLoader._top_level_dir = None
self.assertEqual(None, run.main(
['prog', 'discover', '-l', pkg.package.base], out))
self.assertEqual(dedent("""\
discoverexample.TestExample.test_foo
fred
"""), out.getvalue())
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
| true
| true
|
1c49d5612454adebb23817577e69371c43f1abab
| 17,792
|
py
|
Python
|
coap/coapTransmitter.py
|
TimothyClaeys/coap
|
02487f06980c5c434fcf7efc0f04a97a081e1f13
|
[
"BSD-3-Clause"
] | 53
|
2015-03-04T19:41:29.000Z
|
2021-09-27T18:39:52.000Z
|
coap/coapTransmitter.py
|
TimothyClaeys/coap
|
02487f06980c5c434fcf7efc0f04a97a081e1f13
|
[
"BSD-3-Clause"
] | 7
|
2016-05-18T15:49:43.000Z
|
2019-06-12T15:06:30.000Z
|
coap/coapTransmitter.py
|
TimothyClaeys/coap
|
02487f06980c5c434fcf7efc0f04a97a081e1f13
|
[
"BSD-3-Clause"
] | 57
|
2015-01-07T08:54:54.000Z
|
2021-09-27T18:39:55.000Z
|
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('coapTransmitter')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import time
import threading
import random
from . import coapDefines as d
from . import coapException as e
from . import coapUtils as u
from . import coapMessage as m
class coapTransmitter(threading.Thread):
'''
\brief A class which takes care of transmitting a CoAP message.
It handles:
- waiting for an app-level reply, and
- waiting for a transport-level ACK in case of a confirmable messages.
The thread is ephemeral: it is created for each transmission, and becomes
inactive when the transmission is completed, or times out.
'''
# states of the finite state machine this class implements
STATE_INIT = 'INIT'
STATE_TXCON = 'TXCON'
STATE_TXNON = 'TXNON'
STATE_WAITFORACK = 'WAITFORACK'
STATE_ACKRX = 'ACKRX'
STATE_WAITFOREXPIRATIONMID = 'WAITFOREXPIRATIONMID'
STATE_WAITFORRESP = 'WAITFORRESP'
STATE_RESPRX = 'RESPRX'
STATE_TXACK = 'TXACK'
STATE_ALL = [
STATE_INIT,
STATE_TXCON,
STATE_TXNON,
STATE_WAITFORACK,
STATE_WAITFOREXPIRATIONMID,
STATE_WAITFORRESP,
STATE_TXACK,
]
def __init__(self,sendFunc,srcIp,srcPort,destIp,destPort,confirmable,messageId,code,token,options,payload,securityContext,requestSeq,ackTimeout,respTimeout,maxRetransmit):
'''
\brief Initilizer function.
This function initializes this instance by recording everything about
the CoAP message to be exchange with the remote endpoint. It does not,
however, initiate the exchange, which is done by calling the transmit()
method.
\paran[in] sendFunc The function to call to send a CoAP message.
\param[in] srcIp The IP address of the local endpoint, a string of the
form 'aaaa::1'.
\param[in] srcport The UDP port the local endpoint is attached to, an
integer between 0x0000 and 0xffff.
\param[in] destIp The IP address of the remote CoAP endpoint, a
string of the form 'aaaa::1'.
\param[in] destPort The UDP port the remote endpoint is attached to, an
integer between 0x0000 and 0xffff.
\param[in] confirmable A boolean indicating whether the CoAP request is
to be send confirmable (True) or non-confirmable (False).
\param[in] messageId The message ID to be used for the CoAP request, an
integer. The caller of this function needs to enforce unicity rules
for the value passed.
\param[in] code The CoAP method to used in the request. Needs to a
value of METHOD_ALL.
\param[in] token The token to be used for this exchange. The caller
of this function needs to enforce unicity rules for the value
passed.
\param[in] options A list of CoAP options. Each element needs to be
an instance of the coapOption class. Note that this class will add
appropriate CoAP options to encore the URI and query, if needed.
\param[in] payload The payload to pass in the CoAP request. This needs
to be a byte list, i.e. a list of intergers between 0x00 and 0xff.
This function does not parse this payload, which is written as-is
in the CoAP request.
\param[in] securityContext Security context used for protection of the request
\param[in] requestSeq OSCORE's sequence number from the request.
\param[in] ackTimeout The ACK timeout.
\param[in] respTimeout The app-level response timeout.
'''
# log
log.debug('creating instance')
# store params
self.sendFunc = sendFunc
self.srcIp = srcIp
self.srcPort = srcPort
self.destIp = destIp
self.destPort = destPort
self.confirmable = confirmable
self.messageId = messageId
self.code = code
self.token = token
self.options = options
self.payload = payload
self.securityContext = securityContext
self.requestSeq = requestSeq
self.maxRetransmit = maxRetransmit
# local variables
self.dataLock = threading.Lock() # lock access to internal state
self.fsmSem = threading.Lock() # trigger an FSM iteration
self.startLock = threading.Lock() # released to start communicating
self.endLock = threading.Lock() # released when done communicating
self.stateLock = threading.RLock() # busy setting or getting FSM state
self.rxMsgEvent = threading.Event()
self.receivedACK = None
self.receivedResp = None
self.coapResponse = None
self.coapError = None
self.state = self.STATE_INIT # current state of the FSM
self.numTxCON = 0
self.ackTimeout = ackTimeout
self.respTimeout = respTimeout
self.fsmGoOn = True
self.fsmAction = {
self.STATE_INIT: self._action_INIT,
self.STATE_TXCON: self._action_TXCON,
self.STATE_TXNON: self._action_TXNON,
self.STATE_WAITFORACK: self._action_WAITFORACK,
self.STATE_ACKRX: self._action_ACKRX,
self.STATE_WAITFOREXPIRATIONMID: self._action_WAITFOREXPIRATIONMID,
self.STATE_WAITFORRESP: self._action_WAITFORRESP,
self.STATE_RESPRX: self._action_RESPRX,
self.STATE_TXACK: self._action_TXACK,
}
# initialize parent
threading.Thread.__init__(self)
# give this thread a name
self.name = '[{0}]:{1}--m0x{2:x},0x{3:x}-->[{4}]:{5}'.format(
self.srcIp,
self.srcPort,
self.messageId,
self.token,
self.destIp,
self.destPort,
)
# by default, I'm not communicating
self.startLock.acquire()
self.endLock.acquire()
# start the thread's execution
self.start()
#======================== public ==========================================
def transmit(self):
'''
\brief Start the interaction with the destination, including waiting
for transport-level ACK (if needed), waiting for an app-level
response, and ACKing that (if needed)
This function blocks until a response is received, or the interaction
times out.
\raise coapTimeout When either no ACK is received in time (for
confirmable requests), or no application-level response is received.
\return The received response, already parsed.
'''
# log
log.debug('transmit()')
# start the thread's execution
self.startLock.release()
# wait for it to be done
self.endLock.acquire()
# raise an exception if went wrong, or return response
with self.dataLock:
if self.coapError:
assert not self.coapResponse
raise self.coapError #pylint: disable=E0702
if self.coapResponse:
assert not self.coapError
return self.coapResponse
raise SystemError('neither an error, nor a response')
def getState(self):
with self.stateLock:
return self.state
def receiveMessage(self, timestamp, srcIp, srcPort, message):
assert srcIp==self.destIp
assert srcPort==self.destPort
assert (message['token']==self.token) or (message['messageId']==self.messageId)
# log
log.debug('receiveMessage timestamp={0} srcIp={1} srcPort={2} message={3}'.format(timestamp,srcIp,srcPort,message))
# turn message into exception if needed
if message['code'] not in d.METHOD_ALL+d.COAP_RC_ALL_SUCCESS:
message = e.coapRcFactory(message['code'])
# store packet
with self.dataLock:
self.LastRxPacket = (timestamp,srcIp,srcPort,message)
# signal reception
self.rxMsgEvent.set()
#======================= private ==========================================
#===== fsm
def run(self):
try:
# wait for transmit() to be called
self.startLock.acquire()
# log
log.debug('start FSM')
while self.fsmGoOn:
# wait for the FSM to be kicked
self.fsmSem.acquire()
# log
log.debug('fsm state iteration: {0}'.format(self.getState()))
# call the appropriate action
self.fsmAction[self.getState()]()
# is interaction done?
with self.dataLock:
if self.coapError or self.coapResponse:
self.endLock.release()
self.fsmGoOn=False
except Exception as err:
log.critical(u.formatCrashMessage(
threadName = self.name,
error = err
)
)
def _action_INIT(self):
# log
log.debug('_action_INIT()')
# set state according to confirmable
if self.confirmable:
self._setState(self.STATE_TXCON)
else:
self._setState(self.STATE_TXNON)
# kick FSM
self._kickFsm()
def _action_TXCON(self):
# log
log.debug('_action_TXCON()')
# flag error if max number of CON transmits reached
if self.numTxCON>self.maxRetransmit+1:
# this is an error case
self.coapError = e.coapTimeout('No ACK received after {0} tries (max {1})'.format(
self.numTxCON,
self.maxRetransmit+1,
)
)
return
# build message
message = m.buildMessage(
msgtype = d.TYPE_CON,
token = self.token,
code = self.code,
messageId = self.messageId,
options = self.options,
payload = self.payload,
securityContext = self.securityContext,
partialIV = self.requestSeq,
)
# send
self.sendFunc(
destIp = self.destIp,
destPort = self.destPort,
msg = message,
)
# increment number of transmitted messages
self.numTxCON += 1
# update FSM state
self._setState(self.STATE_WAITFORACK)
# kick FSM
self._kickFsm()
def _action_TXNON(self):
# log
log.debug('_action_TXNON()')
# build message
message = m.buildMessage(
msgtype = d.TYPE_NON,
token = self.token,
code = self.code,
messageId = self.messageId,
options = self.options,
payload = self.payload,
securityContext = self.securityContext,
partialIV = self.requestSeq,
)
# send
self.sendFunc(
destIp = self.destIp,
destPort = self.destPort,
msg = message,
)
# update FSM state
self._setState(self.STATE_WAITFORRESP)
# kick FSM
self._kickFsm()
def _action_WAITFORACK(self):
# log
log.debug('_action_WAITFORACK()')
startTime = time.time()
ackMaxWait = self.ackTimeout*random.uniform(1, d.DFLT_ACK_RANDOM_FACTOR)
while True:
waitTimeLeft = startTime+ackMaxWait-time.time()
if self.rxMsgEvent.wait(timeout=waitTimeLeft):
# I got message
with self.dataLock:
(timestamp,srcIp,srcPort,message) = self.LastRxPacket
if isinstance(message,e.coapRc):
with self.dataLock:
self.coapError = message
return
elif (
message['type']==d.TYPE_ACK and
message['messageId']==self.messageId
):
# store ACK
with self.dataLock:
self.receivedACK = (timestamp,srcIp,srcPort,message)
# update FSM state
self._setState(self.STATE_ACKRX)
# kick FSM
self._kickFsm()
return
else:
# re-send
# update FSM state
self._setState(self.STATE_TXCON)
# kick FSM
self._kickFsm()
return
def _action_ACKRX(self):
# log
log.debug('_action_ACKRX()')
with self.dataLock:
assert self.receivedACK
(timestamp,srcIp,srcPort,message) = self.receivedACK
if message['code']==d.COAP_RC_NONE:
# response NOT piggybacked
# update FSM state
self._setState(self.STATE_WAITFORRESP)
# kick FSM
self._kickFsm()
else:
# piggybacked response
# successful end of FSM
with self.dataLock:
self.coapResponse = message
def _action_WAITFOREXPIRATIONMID(self):
# log
log.debug('_action_WAITFOREXPIRATIONMID()')
raise NotImplementedError()
def _action_WAITFORRESP(self):
# log
log.debug('_action_WAITFORRESP()')
startTime = time.time()
while True:
waitTimeLeft = startTime+self.respTimeout-time.time()
if self.rxMsgEvent.wait(timeout=waitTimeLeft):
# I got message
with self.dataLock:
(timestamp,srcIp,srcPort,message) = self.LastRxPacket
if isinstance(message,e.coapRc):
with self.dataLock:
self.coapError = message
return
elif (
(
message['type']==d.TYPE_CON or
message['type']==d.TYPE_NON
) and
message['token']==self.token
):
# store response
with self.dataLock:
self.receivedResp = (timestamp,srcIp,srcPort,message)
# update FSM state
self._setState(self.STATE_RESPRX)
# kick FSM
self._kickFsm()
return
else:
# this is an error case
self.coapError = e.coapTimeout('No Response received after {0}s'.format(
self.respTimeout,
)
)
return
def _action_RESPRX(self):
# log
log.debug('_action_RESPRX()')
with self.dataLock:
(timestamp,srcIp,srcPort,message) = self.receivedResp
# decide whether to ACK response
if message['type']==d.TYPE_CON:
self._setState(self.STATE_TXACK)
elif message['type']==d.TYPE_NON:
# successful end of FSM
with self.dataLock:
self.coapResponse = message
else:
raise SystemError('unexpected message type {0}'.format(message['type']))
# kick FSM
self._kickFsm()
def _action_TXACK(self):
# log
log.debug('_action_TXACK()')
with self.dataLock:
(timestamp,srcIp,srcPort,message) = self.receivedResp
# build ACK
message = m.buildMessage(
msgtype = d.TYPE_ACK,
token = None,
code = d.COAP_RC_NONE,
messageId = message['messageId'],
)
# send
self.sendFunc(
destIp = message['srcId'], #pylint: disable=E1126
destPort = message['srcPort'], #pylint: disable=E1126
msg = message,
)
# successful end of FSM
with self.dataLock:
self.coapResponse = message
# kick FSM
self._kickFsm()
#===== helpers
def _kickFsm(self):
self.fsmSem.release()
def _setState(self,newState):
with self.stateLock:
self.state = newState
log.debug('{0}: state={1}'.format(self.name,newState))
| 34.48062
| 176
| 0.519054
|
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('coapTransmitter')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import time
import threading
import random
from . import coapDefines as d
from . import coapException as e
from . import coapUtils as u
from . import coapMessage as m
class coapTransmitter(threading.Thread):
STATE_INIT = 'INIT'
STATE_TXCON = 'TXCON'
STATE_TXNON = 'TXNON'
STATE_WAITFORACK = 'WAITFORACK'
STATE_ACKRX = 'ACKRX'
STATE_WAITFOREXPIRATIONMID = 'WAITFOREXPIRATIONMID'
STATE_WAITFORRESP = 'WAITFORRESP'
STATE_RESPRX = 'RESPRX'
STATE_TXACK = 'TXACK'
STATE_ALL = [
STATE_INIT,
STATE_TXCON,
STATE_TXNON,
STATE_WAITFORACK,
STATE_WAITFOREXPIRATIONMID,
STATE_WAITFORRESP,
STATE_TXACK,
]
def __init__(self,sendFunc,srcIp,srcPort,destIp,destPort,confirmable,messageId,code,token,options,payload,securityContext,requestSeq,ackTimeout,respTimeout,maxRetransmit):
log.debug('creating instance')
self.sendFunc = sendFunc
self.srcIp = srcIp
self.srcPort = srcPort
self.destIp = destIp
self.destPort = destPort
self.confirmable = confirmable
self.messageId = messageId
self.code = code
self.token = token
self.options = options
self.payload = payload
self.securityContext = securityContext
self.requestSeq = requestSeq
self.maxRetransmit = maxRetransmit
self.dataLock = threading.Lock()
self.fsmSem = threading.Lock()
self.startLock = threading.Lock()
self.endLock = threading.Lock()
self.stateLock = threading.RLock()
self.rxMsgEvent = threading.Event()
self.receivedACK = None
self.receivedResp = None
self.coapResponse = None
self.coapError = None
self.state = self.STATE_INIT
self.numTxCON = 0
self.ackTimeout = ackTimeout
self.respTimeout = respTimeout
self.fsmGoOn = True
self.fsmAction = {
self.STATE_INIT: self._action_INIT,
self.STATE_TXCON: self._action_TXCON,
self.STATE_TXNON: self._action_TXNON,
self.STATE_WAITFORACK: self._action_WAITFORACK,
self.STATE_ACKRX: self._action_ACKRX,
self.STATE_WAITFOREXPIRATIONMID: self._action_WAITFOREXPIRATIONMID,
self.STATE_WAITFORRESP: self._action_WAITFORRESP,
self.STATE_RESPRX: self._action_RESPRX,
self.STATE_TXACK: self._action_TXACK,
}
threading.Thread.__init__(self)
self.name = '[{0}]:{1}--m0x{2:x},0x{3:x}-->[{4}]:{5}'.format(
self.srcIp,
self.srcPort,
self.messageId,
self.token,
self.destIp,
self.destPort,
)
self.startLock.acquire()
self.endLock.acquire()
# start the thread's execution
self.start()
def transmit(self):
log.debug('transmit()')
self.startLock.release()
# wait for it to be done
self.endLock.acquire()
# raise an exception if went wrong, or return response
with self.dataLock:
if self.coapError:
assert not self.coapResponse
raise self.coapError #pylint: disable=E0702
if self.coapResponse:
assert not self.coapError
return self.coapResponse
raise SystemError('neither an error, nor a response')
def getState(self):
with self.stateLock:
return self.state
def receiveMessage(self, timestamp, srcIp, srcPort, message):
assert srcIp==self.destIp
assert srcPort==self.destPort
assert (message['token']==self.token) or (message['messageId']==self.messageId)
# log
log.debug('receiveMessage timestamp={0} srcIp={1} srcPort={2} message={3}'.format(timestamp,srcIp,srcPort,message))
# turn message into exception if needed
if message['code'] not in d.METHOD_ALL+d.COAP_RC_ALL_SUCCESS:
message = e.coapRcFactory(message['code'])
# store packet
with self.dataLock:
self.LastRxPacket = (timestamp,srcIp,srcPort,message)
# signal reception
self.rxMsgEvent.set()
#======================= private ==========================================
#===== fsm
def run(self):
try:
# wait for transmit() to be called
self.startLock.acquire()
# log
log.debug('start FSM')
while self.fsmGoOn:
# wait for the FSM to be kicked
self.fsmSem.acquire()
# log
log.debug('fsm state iteration: {0}'.format(self.getState()))
# call the appropriate action
self.fsmAction[self.getState()]()
# is interaction done?
with self.dataLock:
if self.coapError or self.coapResponse:
self.endLock.release()
self.fsmGoOn=False
except Exception as err:
log.critical(u.formatCrashMessage(
threadName = self.name,
error = err
)
)
def _action_INIT(self):
# log
log.debug('_action_INIT()')
# set state according to confirmable
if self.confirmable:
self._setState(self.STATE_TXCON)
else:
self._setState(self.STATE_TXNON)
# kick FSM
self._kickFsm()
def _action_TXCON(self):
# log
log.debug('_action_TXCON()')
# flag error if max number of CON transmits reached
if self.numTxCON>self.maxRetransmit+1:
# this is an error case
self.coapError = e.coapTimeout('No ACK received after {0} tries (max {1})'.format(
self.numTxCON,
self.maxRetransmit+1,
)
)
return
# build message
message = m.buildMessage(
msgtype = d.TYPE_CON,
token = self.token,
code = self.code,
messageId = self.messageId,
options = self.options,
payload = self.payload,
securityContext = self.securityContext,
partialIV = self.requestSeq,
)
# send
self.sendFunc(
destIp = self.destIp,
destPort = self.destPort,
msg = message,
)
# increment number of transmitted messages
self.numTxCON += 1
# update FSM state
self._setState(self.STATE_WAITFORACK)
# kick FSM
self._kickFsm()
def _action_TXNON(self):
# log
log.debug('_action_TXNON()')
# build message
message = m.buildMessage(
msgtype = d.TYPE_NON,
token = self.token,
code = self.code,
messageId = self.messageId,
options = self.options,
payload = self.payload,
securityContext = self.securityContext,
partialIV = self.requestSeq,
)
# send
self.sendFunc(
destIp = self.destIp,
destPort = self.destPort,
msg = message,
)
# update FSM state
self._setState(self.STATE_WAITFORRESP)
# kick FSM
self._kickFsm()
def _action_WAITFORACK(self):
# log
log.debug('_action_WAITFORACK()')
startTime = time.time()
ackMaxWait = self.ackTimeout*random.uniform(1, d.DFLT_ACK_RANDOM_FACTOR)
while True:
waitTimeLeft = startTime+ackMaxWait-time.time()
if self.rxMsgEvent.wait(timeout=waitTimeLeft):
# I got message
with self.dataLock:
(timestamp,srcIp,srcPort,message) = self.LastRxPacket
if isinstance(message,e.coapRc):
with self.dataLock:
self.coapError = message
return
elif (
message['type']==d.TYPE_ACK and
message['messageId']==self.messageId
):
# store ACK
with self.dataLock:
self.receivedACK = (timestamp,srcIp,srcPort,message)
# update FSM state
self._setState(self.STATE_ACKRX)
# kick FSM
self._kickFsm()
return
else:
# re-send
# update FSM state
self._setState(self.STATE_TXCON)
# kick FSM
self._kickFsm()
return
def _action_ACKRX(self):
# log
log.debug('_action_ACKRX()')
with self.dataLock:
assert self.receivedACK
(timestamp,srcIp,srcPort,message) = self.receivedACK
if message['code']==d.COAP_RC_NONE:
# response NOT piggybacked
# update FSM state
self._setState(self.STATE_WAITFORRESP)
# kick FSM
self._kickFsm()
else:
# piggybacked response
# successful end of FSM
with self.dataLock:
self.coapResponse = message
def _action_WAITFOREXPIRATIONMID(self):
# log
log.debug('_action_WAITFOREXPIRATIONMID()')
raise NotImplementedError()
def _action_WAITFORRESP(self):
# log
log.debug('_action_WAITFORRESP()')
startTime = time.time()
while True:
waitTimeLeft = startTime+self.respTimeout-time.time()
if self.rxMsgEvent.wait(timeout=waitTimeLeft):
# I got message
with self.dataLock:
(timestamp,srcIp,srcPort,message) = self.LastRxPacket
if isinstance(message,e.coapRc):
with self.dataLock:
self.coapError = message
return
elif (
(
message['type']==d.TYPE_CON or
message['type']==d.TYPE_NON
) and
message['token']==self.token
):
# store response
with self.dataLock:
self.receivedResp = (timestamp,srcIp,srcPort,message)
# update FSM state
self._setState(self.STATE_RESPRX)
# kick FSM
self._kickFsm()
return
else:
# this is an error case
self.coapError = e.coapTimeout('No Response received after {0}s'.format(
self.respTimeout,
)
)
return
def _action_RESPRX(self):
# log
log.debug('_action_RESPRX()')
with self.dataLock:
(timestamp,srcIp,srcPort,message) = self.receivedResp
# decide whether to ACK response
if message['type']==d.TYPE_CON:
self._setState(self.STATE_TXACK)
elif message['type']==d.TYPE_NON:
# successful end of FSM
with self.dataLock:
self.coapResponse = message
else:
raise SystemError('unexpected message type {0}'.format(message['type']))
# kick FSM
self._kickFsm()
def _action_TXACK(self):
# log
log.debug('_action_TXACK()')
with self.dataLock:
(timestamp,srcIp,srcPort,message) = self.receivedResp
# build ACK
message = m.buildMessage(
msgtype = d.TYPE_ACK,
token = None,
code = d.COAP_RC_NONE,
messageId = message['messageId'],
)
# send
self.sendFunc(
destIp = message['srcId'], #pylint: disable=E1126
destPort = message['srcPort'], #pylint: disable=E1126
msg = message,
)
# successful end of FSM
with self.dataLock:
self.coapResponse = message
# kick FSM
self._kickFsm()
#===== helpers
def _kickFsm(self):
self.fsmSem.release()
def _setState(self,newState):
with self.stateLock:
self.state = newState
log.debug('{0}: state={1}'.format(self.name,newState))
| true
| true
|
1c49d5f237a286b2bb5c13f14cb28491327c2343
| 3,576
|
py
|
Python
|
funboost/factories/publisher_factotry.py
|
DJMIN/funboost
|
7570ca2909bb0b44a1080f5f98aa96c86d3da9d4
|
[
"Apache-2.0"
] | null | null | null |
funboost/factories/publisher_factotry.py
|
DJMIN/funboost
|
7570ca2909bb0b44a1080f5f98aa96c86d3da9d4
|
[
"Apache-2.0"
] | null | null | null |
funboost/factories/publisher_factotry.py
|
DJMIN/funboost
|
7570ca2909bb0b44a1080f5f98aa96c86d3da9d4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Author : ydf
# @Time : 2019/8/8 0008 13:16
import copy
from typing import Callable
from funboost.publishers.confluent_kafka_publisher import ConfluentKafkaPublisher
from funboost.publishers.http_publisher import HTTPPublisher
from funboost.publishers.kombu_publisher import KombuPublisher
from funboost.publishers.nats_publisher import NatsPublisher
from funboost.publishers.redis_publisher_lpush import RedisPublisherLpush
from funboost.publishers.tcp_publisher import TCPPublisher
from funboost.publishers.txt_file_publisher import TxtFilePublisher
from funboost.publishers.udp_publisher import UDPPublisher
from funboost.publishers.zeromq_publisher import ZeroMqPublisher
from funboost.publishers.kafka_publisher import KafkaPublisher
from funboost.publishers.local_python_queue_publisher import LocalPythonQueuePublisher
from funboost.publishers.mongomq_publisher import MongoMqPublisher
from funboost.publishers.nsq_publisher import NsqPublisher
from funboost.publishers.persist_queue_publisher import PersistQueuePublisher
from funboost.publishers.rabbitmq_amqpstorm_publisher import RabbitmqPublisherUsingAmqpStorm
from funboost.publishers.rabbitmq_pika_publisher import RabbitmqPublisher
from funboost.publishers.rabbitmq_rabbitpy_publisher import RabbitmqPublisherUsingRabbitpy
from funboost.publishers.redis_publisher import RedisPublisher
from funboost.publishers.rocketmq_publisher import RocketmqPublisher
from funboost.publishers.sqla_queue_publisher import SqlachemyQueuePublisher
from funboost.publishers.redis_stream_publisher import RedisStreamPublisher
from funboost.publishers.mqtt_publisher import MqttPublisher
from funboost.publishers.httpsqs_publisher import HttpsqsPublisher
from funboost import funboost_config_deafult
def get_publisher(queue_name, *, log_level_int=10, logger_prefix='', is_add_file_handler=True,
clear_queue_within_init=False, is_add_publish_time=True, consuming_function: Callable = None,
broker_kind: int = None):
"""
:param queue_name:
:param log_level_int:
:param logger_prefix:
:param is_add_file_handler:
:param clear_queue_within_init:
:param is_add_publish_time:是否添加发布时间,以后废弃,都添加。
:param consuming_function:消费函数,为了做发布时候的函数入参校验用的,如果不传则不做发布任务的校验,
例如add 函数接收x,y入参,你推送{"x":1,"z":3}就是不正确的,函数不接受z参数。
:param broker_kind: 中间件或使用包的种类。
:return:
"""
all_kwargs = copy.deepcopy(locals())
all_kwargs.pop('broker_kind')
broker_kind__publisher_type_map = {
0: RabbitmqPublisherUsingAmqpStorm,
1: RabbitmqPublisherUsingRabbitpy,
2: RedisPublisher,
3: LocalPythonQueuePublisher,
4: RabbitmqPublisher,
5: MongoMqPublisher,
6: PersistQueuePublisher,
7: NsqPublisher,
8: KafkaPublisher,
9: RedisPublisher,
10: SqlachemyQueuePublisher,
11: RocketmqPublisher,
12: RedisStreamPublisher,
13: ZeroMqPublisher,
14: RedisPublisherLpush,
15: KombuPublisher,
16: ConfluentKafkaPublisher,
17: MqttPublisher,
18: HttpsqsPublisher,
21: UDPPublisher,
22: TCPPublisher,
23: HTTPPublisher,
24: NatsPublisher,
25: TxtFilePublisher,
}
if broker_kind is None:
broker_kind = funboost_config_deafult.DEFAULT_BROKER_KIND
if broker_kind not in broker_kind__publisher_type_map:
raise ValueError(f'设置的中间件种类数字不正确,你设置的值是 {broker_kind} ')
return broker_kind__publisher_type_map[broker_kind](**all_kwargs)
| 43.609756
| 111
| 0.782438
|
import copy
from typing import Callable
from funboost.publishers.confluent_kafka_publisher import ConfluentKafkaPublisher
from funboost.publishers.http_publisher import HTTPPublisher
from funboost.publishers.kombu_publisher import KombuPublisher
from funboost.publishers.nats_publisher import NatsPublisher
from funboost.publishers.redis_publisher_lpush import RedisPublisherLpush
from funboost.publishers.tcp_publisher import TCPPublisher
from funboost.publishers.txt_file_publisher import TxtFilePublisher
from funboost.publishers.udp_publisher import UDPPublisher
from funboost.publishers.zeromq_publisher import ZeroMqPublisher
from funboost.publishers.kafka_publisher import KafkaPublisher
from funboost.publishers.local_python_queue_publisher import LocalPythonQueuePublisher
from funboost.publishers.mongomq_publisher import MongoMqPublisher
from funboost.publishers.nsq_publisher import NsqPublisher
from funboost.publishers.persist_queue_publisher import PersistQueuePublisher
from funboost.publishers.rabbitmq_amqpstorm_publisher import RabbitmqPublisherUsingAmqpStorm
from funboost.publishers.rabbitmq_pika_publisher import RabbitmqPublisher
from funboost.publishers.rabbitmq_rabbitpy_publisher import RabbitmqPublisherUsingRabbitpy
from funboost.publishers.redis_publisher import RedisPublisher
from funboost.publishers.rocketmq_publisher import RocketmqPublisher
from funboost.publishers.sqla_queue_publisher import SqlachemyQueuePublisher
from funboost.publishers.redis_stream_publisher import RedisStreamPublisher
from funboost.publishers.mqtt_publisher import MqttPublisher
from funboost.publishers.httpsqs_publisher import HttpsqsPublisher
from funboost import funboost_config_deafult
def get_publisher(queue_name, *, log_level_int=10, logger_prefix='', is_add_file_handler=True,
clear_queue_within_init=False, is_add_publish_time=True, consuming_function: Callable = None,
broker_kind: int = None):
all_kwargs = copy.deepcopy(locals())
all_kwargs.pop('broker_kind')
broker_kind__publisher_type_map = {
0: RabbitmqPublisherUsingAmqpStorm,
1: RabbitmqPublisherUsingRabbitpy,
2: RedisPublisher,
3: LocalPythonQueuePublisher,
4: RabbitmqPublisher,
5: MongoMqPublisher,
6: PersistQueuePublisher,
7: NsqPublisher,
8: KafkaPublisher,
9: RedisPublisher,
10: SqlachemyQueuePublisher,
11: RocketmqPublisher,
12: RedisStreamPublisher,
13: ZeroMqPublisher,
14: RedisPublisherLpush,
15: KombuPublisher,
16: ConfluentKafkaPublisher,
17: MqttPublisher,
18: HttpsqsPublisher,
21: UDPPublisher,
22: TCPPublisher,
23: HTTPPublisher,
24: NatsPublisher,
25: TxtFilePublisher,
}
if broker_kind is None:
broker_kind = funboost_config_deafult.DEFAULT_BROKER_KIND
if broker_kind not in broker_kind__publisher_type_map:
raise ValueError(f'设置的中间件种类数字不正确,你设置的值是 {broker_kind} ')
return broker_kind__publisher_type_map[broker_kind](**all_kwargs)
| true
| true
|
1c49d63275e5e8dd3611c5fba177eb143551df99
| 15,890
|
py
|
Python
|
docs/source/conf.py
|
crusaderky/distributed
|
d1cf1d452aece30b75adaf7f73f7cfdc69a63c4a
|
[
"BSD-3-Clause"
] | 1,358
|
2016-02-09T21:25:27.000Z
|
2022-03-30T08:06:36.000Z
|
docs/source/conf.py
|
crusaderky/distributed
|
d1cf1d452aece30b75adaf7f73f7cfdc69a63c4a
|
[
"BSD-3-Clause"
] | 4,789
|
2016-02-10T00:13:43.000Z
|
2022-03-31T23:56:27.000Z
|
docs/source/conf.py
|
crusaderky/distributed
|
d1cf1d452aece30b75adaf7f73f7cfdc69a63c4a
|
[
"BSD-3-Clause"
] | 791
|
2016-02-19T04:34:38.000Z
|
2022-03-31T16:26:38.000Z
|
from __future__ import annotations
#
# Dask.distributed documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 6 14:42:44 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.todo",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.autosummary",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"numpydoc",
"sphinx_click.ext",
]
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Dask.distributed"
copyright = "2016, Anaconda, Inc."
author = "Anaconda, Inc."
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import distributed
version = distributed.__version__
# The full version, including alpha/beta/rc tags.
release = distributed.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns: list[str] = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "default"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
html_theme = "dask_sphinx_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path: list[str] = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "distributeddoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements: dict[str, str] = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"distributed.tex",
"Dask.distributed Documentation",
"Matthew Rocklin",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, "Dask.distributed", "Dask.distributed Documentation", [author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"Dask.distributed",
"Dask.distributed Documentation",
author,
"Dask.distributed",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
# Link to GitHub issues and pull requests using :pr:`1234` and :issue:`1234`
# syntax
extlinks = {
"issue": ("https://github.com/dask/distributed/issues/%s", "GH#"),
"pr": ("https://github.com/dask/distributed/pull/%s", "GH#"),
}
# Configuration for intersphinx: refer to the Python standard library
# and the Numpy documentation.
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"numpy": ("https://docs.scipy.org/doc/numpy", None),
"pandas": ("https://pandas.pydata.org/docs", None),
"dask": ("https://docs.dask.org/en/latest", None),
"bokeh": ("https://docs.bokeh.org/en/latest", None),
}
# Redirects
# https://tech.signavio.com/2017/managing-sphinx-redirects
redirect_files = [
# old html, new html
("joblib.html", "https://ml.dask.org/joblib.html"),
("setup.html", "https://docs.dask.org/en/latest/setup.html"),
("ec2.html", "https://docs.dask.org/en/latest/setup/cloud.html"),
("configuration.html", "https://docs.dask.org/en/latest/configuration.html"),
(
"local-cluster.html",
"https://docs.dask.org/en/latest/setup/single-distributed.html",
),
("adaptive.html", "https://docs.dask.org/en/latest/setup/adaptive.html"),
("prometheus.html", "https://docs.dask.org/en/latest/setup/prometheus.html"),
("web.html", "https://docs.dask.org/en/latest/diagnostics-distributed.html"),
]
redirect_template = """\
<html>
<head>
<meta http-equiv="refresh" content="1; url={new}" />
<script>
window.location.href = "{new}"
</script>
</head>
</html>
"""
def copy_legacy_redirects(app, docname):
if app.builder.name == "html":
for html_src_path, new in redirect_files:
page = redirect_template.format(new=new)
target_path = app.outdir + "/" + html_src_path
with open(target_path, "w") as f:
f.write(page)
from docutils.parsers.rst import directives # type: ignore
# -- Configuration to keep autosummary in sync with autoclass::members ----------------------------------------------
# Fixes issues/3693
# See https://stackoverflow.com/questions/20569011/python-sphinx-autosummary-automated-listing-of-member-functions
from sphinx.ext.autosummary import Autosummary, get_documenter
from sphinx.util.inspect import safe_getattr
class AutoAutoSummary(Autosummary):
"""Create a summary for methods and attributes (autosummary).
See https://stackoverflow.com/questions/20569011/python-sphinx-autosummary-automated-listing-of-member-functions
"""
option_spec = {
"methods": directives.unchanged,
"attributes": directives.unchanged,
}
required_arguments = 1
@staticmethod
def get_members(app, obj, typ, include_public=None):
if not include_public:
include_public = []
items = []
for name in sorted(obj.__dict__.keys()):
try:
documenter = get_documenter(app, safe_getattr(obj, name), obj)
except AttributeError:
continue
if documenter.objtype in typ:
items.append(name)
public = [x for x in items if x in include_public or not x.startswith("_")]
return public, items
def run(self):
clazz = str(self.arguments[0])
(module_name, class_name) = clazz.rsplit(".", 1)
m = __import__(module_name, globals(), locals(), [class_name])
c = getattr(m, class_name)
app = self.state.document.settings.env.app
if "methods" in self.options:
_, methods = self.get_members(app, c, ["method"], ["__init__"])
self.content = [
f"{class_name}.{method}"
for method in methods
if not method.startswith("_")
]
if "attributes" in self.options:
_, attribs = self.get_members(app, c, ["attribute", "property"])
self.content = [
f"~{clazz}.{attrib}" for attrib in attribs if not attrib.startswith("_")
]
return super().run()
def setup(app):
app.add_directive("autoautosummary", AutoAutoSummary)
app.connect("build-finished", copy_legacy_redirects)
| 32.428571
| 117
| 0.685903
|
from __future__ import annotations
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.todo",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.autosummary",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"numpydoc",
"sphinx_click.ext",
]
numpydoc_show_class_members = False
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
project = "Dask.distributed"
copyright = "2016, Anaconda, Inc."
author = "Anaconda, Inc."
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import distributed
version = distributed.__version__
# The full version, including alpha/beta/rc tags.
release = distributed.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns: list[str] = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "default"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
html_theme = "dask_sphinx_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path: list[str] = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "distributeddoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements: dict[str, str] = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"distributed.tex",
"Dask.distributed Documentation",
"Matthew Rocklin",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, "Dask.distributed", "Dask.distributed Documentation", [author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"Dask.distributed",
"Dask.distributed Documentation",
author,
"Dask.distributed",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
epub_exclude_files = ["search.html"]
extlinks = {
"issue": ("https://github.com/dask/distributed/issues/%s", "GH#"),
"pr": ("https://github.com/dask/distributed/pull/%s", "GH#"),
}
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"numpy": ("https://docs.scipy.org/doc/numpy", None),
"pandas": ("https://pandas.pydata.org/docs", None),
"dask": ("https://docs.dask.org/en/latest", None),
"bokeh": ("https://docs.bokeh.org/en/latest", None),
}
redirect_files = [
("joblib.html", "https://ml.dask.org/joblib.html"),
("setup.html", "https://docs.dask.org/en/latest/setup.html"),
("ec2.html", "https://docs.dask.org/en/latest/setup/cloud.html"),
("configuration.html", "https://docs.dask.org/en/latest/configuration.html"),
(
"local-cluster.html",
"https://docs.dask.org/en/latest/setup/single-distributed.html",
),
("adaptive.html", "https://docs.dask.org/en/latest/setup/adaptive.html"),
("prometheus.html", "https://docs.dask.org/en/latest/setup/prometheus.html"),
("web.html", "https://docs.dask.org/en/latest/diagnostics-distributed.html"),
]
redirect_template = """\
<html>
<head>
<meta http-equiv="refresh" content="1; url={new}" />
<script>
window.location.href = "{new}"
</script>
</head>
</html>
"""
def copy_legacy_redirects(app, docname):
if app.builder.name == "html":
for html_src_path, new in redirect_files:
page = redirect_template.format(new=new)
target_path = app.outdir + "/" + html_src_path
with open(target_path, "w") as f:
f.write(page)
from docutils.parsers.rst import directives
from sphinx.ext.autosummary import Autosummary, get_documenter
from sphinx.util.inspect import safe_getattr
class AutoAutoSummary(Autosummary):
option_spec = {
"methods": directives.unchanged,
"attributes": directives.unchanged,
}
required_arguments = 1
@staticmethod
def get_members(app, obj, typ, include_public=None):
if not include_public:
include_public = []
items = []
for name in sorted(obj.__dict__.keys()):
try:
documenter = get_documenter(app, safe_getattr(obj, name), obj)
except AttributeError:
continue
if documenter.objtype in typ:
items.append(name)
public = [x for x in items if x in include_public or not x.startswith("_")]
return public, items
def run(self):
clazz = str(self.arguments[0])
(module_name, class_name) = clazz.rsplit(".", 1)
m = __import__(module_name, globals(), locals(), [class_name])
c = getattr(m, class_name)
app = self.state.document.settings.env.app
if "methods" in self.options:
_, methods = self.get_members(app, c, ["method"], ["__init__"])
self.content = [
f"{class_name}.{method}"
for method in methods
if not method.startswith("_")
]
if "attributes" in self.options:
_, attribs = self.get_members(app, c, ["attribute", "property"])
self.content = [
f"~{clazz}.{attrib}" for attrib in attribs if not attrib.startswith("_")
]
return super().run()
def setup(app):
app.add_directive("autoautosummary", AutoAutoSummary)
app.connect("build-finished", copy_legacy_redirects)
| true
| true
|
1c49d6af950d52fdab2916b0e3bb2e50b50c6d22
| 6,893
|
py
|
Python
|
python/paddle/incubate/operators/graph_reindex.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 11
|
2016-08-29T07:43:26.000Z
|
2016-08-29T07:51:24.000Z
|
python/paddle/incubate/operators/graph_reindex.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/incubate/operators/graph_reindex.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 1
|
2021-12-09T08:59:17.000Z
|
2021-12-09T08:59:17.000Z
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.framework import _non_static_mode
from paddle.fluid.data_feeder import check_variable_and_dtype
from paddle.fluid import core
from paddle import _C_ops
def graph_reindex(x,
neighbors,
count,
value_buffer=None,
index_buffer=None,
flag_buffer_hashtable=False,
name=None):
"""
Graph Reindex API.
This API is mainly used in Graph Learning domain, which should be used
in conjunction with `graph_sample_neighbors` API. And the main purpose
is to reindex the ids information of the input nodes, and return the
corresponding graph edges after reindex.
**Notes**:
The number in x should be unique, otherwise it would cause potential errors.
Besides, we also support multi-edge-types neighbors reindexing. If we have different
edge_type neighbors for x, we should concatenate all the neighbors and count of x.
We will reindex all the nodes from 0.
Take input nodes x = [0, 1, 2] as an example.
If we have neighbors = [8, 9, 0, 4, 7, 6, 7], and count = [2, 3, 2],
then we know that the neighbors of 0 is [8, 9], the neighbors of 1
is [0, 4, 7], and the neighbors of 2 is [6, 7].
Args:
x (Tensor): The input nodes which we sample neighbors for. The available
data type is int32, int64.
neighbors (Tensor): The neighbors of the input nodes `x`. The data type
should be the same with `x`.
count (Tensor): The neighbor count of the input nodes `x`. And the
data type should be int32.
value_buffer (Tensor|None): Value buffer for hashtable. The data type should
be int32, and should be filled with -1.
index_buffer (Tensor|None): Index buffer for hashtable. The data type should
be int32, and should be filled with -1.
flag_buffer_hashtable (bool): Whether to use buffer for hashtable to speed up.
Default is False. Only useful for gpu version currently.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
reindex_src (Tensor): The source node index of graph edges after reindex.
reindex_dst (Tensor): The destination node index of graph edges after reindex.
out_nodes (Tensor): The index of unique input nodes and neighbors before reindex,
where we put the input nodes `x` in the front, and put neighbor
nodes in the back.
Examples:
.. code-block:: python
import paddle
x = [0, 1, 2]
neighbors_e1 = [8, 9, 0, 4, 7, 6, 7]
count_e1 = [2, 3, 2]
x = paddle.to_tensor(x, dtype="int64")
neighbors_e1 = paddle.to_tensor(neighbors_e1, dtype="int64")
count_e1 = paddle.to_tensor(count_e1, dtype="int32")
reindex_src, reindex_dst, out_nodes = \
paddle.incubate.graph_reindex(x, neighbors_e1, count_e1)
# reindex_src: [3, 4, 0, 5, 6, 7, 6]
# reindex_dst: [0, 0, 1, 1, 1, 2, 2]
# out_nodes: [0, 1, 2, 8, 9, 4, 7, 6]
neighbors_e2 = [0, 2, 3, 5, 1]
count_e2 = [1, 3, 1]
neighbors_e2 = paddle.to_tensor(neighbors_e2, dtype="int64")
count_e2 = paddle.to_tensor(count_e2, dtype="int32")
neighbors = paddle.concat([neighbors_e1, neighbors_e2])
count = paddle.concat([count_e1, count_e2])
reindex_src, reindex_dst, out_nodes = \
paddle.incubate.graph_reindex(x, neighbors, count)
# reindex_src: [3, 4, 0, 5, 6, 7, 6, 0, 2, 8, 9, 1]
# reindex_dst: [0, 0, 1, 1, 1, 2, 2, 0, 1, 1, 1, 2]
# out_nodes: [0, 1, 2, 8, 9, 4, 7, 6, 3, 5]
"""
if flag_buffer_hashtable:
if value_buffer is None or index_buffer is None:
raise ValueError(f"`value_buffer` and `index_buffer` should not"
"be None if `flag_buffer_hashtable` is True.")
if _non_static_mode():
reindex_src, reindex_dst, out_nodes = \
_C_ops.graph_reindex(x, neighbors, count, value_buffer, index_buffer,
"flag_buffer_hashtable", flag_buffer_hashtable)
return reindex_src, reindex_dst, out_nodes
check_variable_and_dtype(x, "X", ("int32", "int64"), "graph_reindex")
check_variable_and_dtype(neighbors, "Neighbors", ("int32", "int64"),
"graph_reindex")
check_variable_and_dtype(count, "Count", ("int32"), "graph_reindex")
if flag_buffer_hashtable:
check_variable_and_dtype(value_buffer, "HashTable_Value", ("int32"),
"graph_reindex")
check_variable_and_dtype(index_buffer, "HashTable_Value", ("int32"),
"graph_reindex")
helper = LayerHelper("graph_reindex", **locals())
reindex_src = helper.create_variable_for_type_inference(dtype=x.dtype)
reindex_dst = helper.create_variable_for_type_inference(dtype=x.dtype)
out_nodes = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type="graph_reindex",
inputs={
"X":
x,
"Neighbors":
neighbors,
"Count":
count,
"HashTable_Value":
value_buffer if flag_buffer_hashtable else None,
"HashTable_Index":
index_buffer if flag_buffer_hashtable else None,
},
outputs={
"Reindex_Src": reindex_src,
"Reindex_Dst": reindex_dst,
"Out_Nodes": out_nodes
},
attrs={"flag_buffer_hashtable": flag_buffer_hashtable})
return reindex_src, reindex_dst, out_nodes
| 45.649007
| 94
| 0.596402
|
import paddle
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.framework import _non_static_mode
from paddle.fluid.data_feeder import check_variable_and_dtype
from paddle.fluid import core
from paddle import _C_ops
def graph_reindex(x,
neighbors,
count,
value_buffer=None,
index_buffer=None,
flag_buffer_hashtable=False,
name=None):
if flag_buffer_hashtable:
if value_buffer is None or index_buffer is None:
raise ValueError(f"`value_buffer` and `index_buffer` should not"
"be None if `flag_buffer_hashtable` is True.")
if _non_static_mode():
reindex_src, reindex_dst, out_nodes = \
_C_ops.graph_reindex(x, neighbors, count, value_buffer, index_buffer,
"flag_buffer_hashtable", flag_buffer_hashtable)
return reindex_src, reindex_dst, out_nodes
check_variable_and_dtype(x, "X", ("int32", "int64"), "graph_reindex")
check_variable_and_dtype(neighbors, "Neighbors", ("int32", "int64"),
"graph_reindex")
check_variable_and_dtype(count, "Count", ("int32"), "graph_reindex")
if flag_buffer_hashtable:
check_variable_and_dtype(value_buffer, "HashTable_Value", ("int32"),
"graph_reindex")
check_variable_and_dtype(index_buffer, "HashTable_Value", ("int32"),
"graph_reindex")
helper = LayerHelper("graph_reindex", **locals())
reindex_src = helper.create_variable_for_type_inference(dtype=x.dtype)
reindex_dst = helper.create_variable_for_type_inference(dtype=x.dtype)
out_nodes = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type="graph_reindex",
inputs={
"X":
x,
"Neighbors":
neighbors,
"Count":
count,
"HashTable_Value":
value_buffer if flag_buffer_hashtable else None,
"HashTable_Index":
index_buffer if flag_buffer_hashtable else None,
},
outputs={
"Reindex_Src": reindex_src,
"Reindex_Dst": reindex_dst,
"Out_Nodes": out_nodes
},
attrs={"flag_buffer_hashtable": flag_buffer_hashtable})
return reindex_src, reindex_dst, out_nodes
| true
| true
|
1c49d6baa6cc4f1e16d6a5132cb65e2120088470
| 1,158
|
py
|
Python
|
Advent of Code/2020/3/3.py
|
dimitrov-dimitar/competitive-programming
|
f2b022377baf6d4beff213fc513907b774c12352
|
[
"MIT"
] | null | null | null |
Advent of Code/2020/3/3.py
|
dimitrov-dimitar/competitive-programming
|
f2b022377baf6d4beff213fc513907b774c12352
|
[
"MIT"
] | null | null | null |
Advent of Code/2020/3/3.py
|
dimitrov-dimitar/competitive-programming
|
f2b022377baf6d4beff213fc513907b774c12352
|
[
"MIT"
] | null | null | null |
matrix = []
with open('input') as f:
for row in f:
row_matrix = [x for x in row if x != '\n']
row_matrix *= 1000
matrix.append(row_matrix)
# print(matrix[0])
i, j = 0, 0
counter_1 = counter_2 = counter_3 = counter_4 = counter_5 = 0
# Part One
# 3, 1
while True:
if i >= len(matrix):
break
if matrix[i][j] == '#':
counter_1 += 1
i += 1
j += 3
print(counter_1)
# Part Two
# 1, 1
i, j = 0, 0
while True:
if i >= len(matrix):
break
if matrix[i][j] == '#':
counter_2 += 1
i += 1
j += 1
print(counter_2)
# 5, 1
i, j = 0, 0
while True:
if i >= len(matrix):
break
if matrix[i][j] == '#':
counter_3 += 1
i += 1
j += 5
print(counter_3)
# 7, 1
i, j = 0, 0
while True:
if i >= len(matrix):
break
if matrix[i][j] == '#':
counter_4 += 1
i += 1
j += 7
print(counter_4)
# 1, 2
i, j = 0, 0
while True:
if i >= len(matrix):
break
if matrix[i][j] == '#':
counter_5 += 1
i += 2
j += 1
print(counter_5)
print(counter_1 * counter_2 * counter_3 * counter_4 * counter_5)
| 13.623529
| 64
| 0.486183
|
matrix = []
with open('input') as f:
for row in f:
row_matrix = [x for x in row if x != '\n']
row_matrix *= 1000
matrix.append(row_matrix)
i, j = 0, 0
counter_1 = counter_2 = counter_3 = counter_4 = counter_5 = 0
while True:
if i >= len(matrix):
break
if matrix[i][j] == '#':
counter_1 += 1
i += 1
j += 3
print(counter_1)
i, j = 0, 0
while True:
if i >= len(matrix):
break
if matrix[i][j] == '#':
counter_2 += 1
i += 1
j += 1
print(counter_2)
i, j = 0, 0
while True:
if i >= len(matrix):
break
if matrix[i][j] == '#':
counter_3 += 1
i += 1
j += 5
print(counter_3)
i, j = 0, 0
while True:
if i >= len(matrix):
break
if matrix[i][j] == '#':
counter_4 += 1
i += 1
j += 7
print(counter_4)
i, j = 0, 0
while True:
if i >= len(matrix):
break
if matrix[i][j] == '#':
counter_5 += 1
i += 2
j += 1
print(counter_5)
print(counter_1 * counter_2 * counter_3 * counter_4 * counter_5)
| true
| true
|
1c49d6e03e4aa9f496a950a08e7afb8664ce56e7
| 4,752
|
py
|
Python
|
Simple Text classifiers/Text Classification on 20Newsgroup using NN/20ng_classifier - Conv1d.py
|
tejasurya/Text_Classification_using_Neural_Networks
|
d4852780e6c86843aee768d306d19428c8cb9c7f
|
[
"MIT"
] | 1
|
2020-04-30T16:15:42.000Z
|
2020-04-30T16:15:42.000Z
|
Simple Text classifiers/Text Classification on 20Newsgroup using NN/20ng_classifier - Conv1d.py
|
tejasurya/Text_Classification_using_Neural_Networks
|
d4852780e6c86843aee768d306d19428c8cb9c7f
|
[
"MIT"
] | null | null | null |
Simple Text classifiers/Text Classification on 20Newsgroup using NN/20ng_classifier - Conv1d.py
|
tejasurya/Text_Classification_using_Neural_Networks
|
d4852780e6c86843aee768d306d19428c8cb9c7f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 6 15:55:01 2018
@author: HP
"""
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 5 14:31:43 2018
@author: HP
"""
import os
import pandas as pd
import nltk
import gensim
from gensim import corpora, models, similarities
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from gensim.models.keyedvectors import KeyedVectors as KV
from numpy import asarray
from numpy import zeros
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten, LSTM ,Dropout,GRU, Bidirectional
from keras.layers import Embedding
from collections import defaultdict
from keras.layers import Conv1D, MaxPooling1D
import random
from sklearn.datasets import fetch_20newsgroups
batch_size=32
embedding_size=128
nclass=20
# Convolution
kernel_size = 5
filters1 = 64
filters2 =128
filters3=256
filters4=512
filters5=1024
pool_size = 4
# GRU
gru_output_size = 70
#LSTM
lstm_output_size = 70
trim_len=200
sample_cnt=500
trainer = fetch_20newsgroups(subset='train')
tester = fetch_20newsgroups(subset='test')
#input - output
train_ip=trainer.data
train_op=list(trainer.target)
test_ip=tester.data
test_op=list(tester.target)
ip=train_ip+test_ip
op=train_op+test_op
ip=ip[0:sample_cnt]
for ty in range(len(ip)):
ip[ty]=ip[ty][0:trim_len]
op=op[0:sample_cnt]
len_finder=[]
for dat in ip:
len_finder.append(len(dat))
#Splitting train and test
input_train=[]
input_test=[]
input_valid=[]
j=0;
for zz in ip:
j=j+1
if (j%5 is 0):
input_test.append(zz)
elif(j%5 is 1):
input_valid.append(zz)
else:
input_train.append(zz)
label_train=[]
label_test=[]
label_valid=[]
j=0;
for zz in op:
j=j+1
if (j%5 is 0):
label_test.append(zz)
elif(j%5 is 1):
label_valid.append(zz)
else:
label_train.append(zz)
#one hot encoding
i=0
y_train=np.zeros((len(label_train),max(label_train)+1))
for x in label_train:
y_train[i][x]=1
i=i+1
i=0
y_test=np.zeros((len(label_test),max(label_test)+1))
for x in label_test:
y_test[i][x]=1
i=i+1
i=0
y_valid=np.zeros((len(label_valid),max(label_valid)+1))
for x in label_valid:
y_valid[i][x]=1
i=i+1
t = Tokenizer()
t.fit_on_texts(input_train)
vocab_size = len(t.word_index) + 1
# integer encode the documents
encoded_docs = t.texts_to_sequences(input_train)
#print(encoded_docs)
# pad documents to a max length of 4 words
max_length = max(len_finder)
padded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding='post')
#print(padded_docs)
# load the whole embedding into memory
embeddings_index = dict()
f = open("G:\\NLP\\Dataset\\GloVe\\glove.6B.100d.txt", encoding="utf8")
for line in f:
values = line.split()
word = values[0]
coefs = asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
#print('Loaded %s word vectors.' % len(embeddings_index))
# create a weight matrix for words in training docs
embedding_matrix = zeros((vocab_size, 100))
for word, i in t.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
#Validating the model
vt = Tokenizer()
vt.fit_on_texts(input_valid)
vvocab_size = len(vt.word_index) + 1
# integer encode the documents
vencoded_docs = vt.texts_to_sequences(input_valid)
#print(encoded_docs)
# pad documents to a max length of 4 words
vpadded_docs = pad_sequences(vencoded_docs, maxlen=max_length, padding='post')
#print(padded_docs)
#Testing the model
tt = Tokenizer()
tt.fit_on_texts(input_test)
tvocab_size = len(tt.word_index) + 1
# integer encode the documents
tencoded_docs = tt.texts_to_sequences(input_test)
#print(encoded_docs)
# pad documents to a max length of 4 words
tpadded_docs = pad_sequences(tencoded_docs, maxlen=max_length, padding='post')
#print(padded_docs)
# define model
model = Sequential()
e = Embedding(vocab_size, 100, weights=[embedding_matrix], input_length=max_length, trainable=False)
model.add(e)
model.add(Conv1D(64,kernel_size,padding='valid',activation='relu',strides=1))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(Flatten())
model.add(Dense(nclass, activation='softmax'))
# compile the model
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
# summarize the model
print(model.summary())
# fit the model
model.fit(padded_docs,y_train, epochs=1, verbose=0, validation_data=(vpadded_docs, y_valid))
# evaluate the model
loss, accuracy = model.evaluate(tpadded_docs, y_test, verbose=0)
print('Accuracy: %f' % (accuracy*100))
| 23.180488
| 100
| 0.742635
|
import os
import pandas as pd
import nltk
import gensim
from gensim import corpora, models, similarities
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from gensim.models.keyedvectors import KeyedVectors as KV
from numpy import asarray
from numpy import zeros
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten, LSTM ,Dropout,GRU, Bidirectional
from keras.layers import Embedding
from collections import defaultdict
from keras.layers import Conv1D, MaxPooling1D
import random
from sklearn.datasets import fetch_20newsgroups
batch_size=32
embedding_size=128
nclass=20
kernel_size = 5
filters1 = 64
filters2 =128
filters3=256
filters4=512
filters5=1024
pool_size = 4
gru_output_size = 70
lstm_output_size = 70
trim_len=200
sample_cnt=500
trainer = fetch_20newsgroups(subset='train')
tester = fetch_20newsgroups(subset='test')
train_ip=trainer.data
train_op=list(trainer.target)
test_ip=tester.data
test_op=list(tester.target)
ip=train_ip+test_ip
op=train_op+test_op
ip=ip[0:sample_cnt]
for ty in range(len(ip)):
ip[ty]=ip[ty][0:trim_len]
op=op[0:sample_cnt]
len_finder=[]
for dat in ip:
len_finder.append(len(dat))
input_train=[]
input_test=[]
input_valid=[]
j=0;
for zz in ip:
j=j+1
if (j%5 is 0):
input_test.append(zz)
elif(j%5 is 1):
input_valid.append(zz)
else:
input_train.append(zz)
label_train=[]
label_test=[]
label_valid=[]
j=0;
for zz in op:
j=j+1
if (j%5 is 0):
label_test.append(zz)
elif(j%5 is 1):
label_valid.append(zz)
else:
label_train.append(zz)
i=0
y_train=np.zeros((len(label_train),max(label_train)+1))
for x in label_train:
y_train[i][x]=1
i=i+1
i=0
y_test=np.zeros((len(label_test),max(label_test)+1))
for x in label_test:
y_test[i][x]=1
i=i+1
i=0
y_valid=np.zeros((len(label_valid),max(label_valid)+1))
for x in label_valid:
y_valid[i][x]=1
i=i+1
t = Tokenizer()
t.fit_on_texts(input_train)
vocab_size = len(t.word_index) + 1
encoded_docs = t.texts_to_sequences(input_train)
max_length = max(len_finder)
padded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding='post')
embeddings_index = dict()
f = open("G:\\NLP\\Dataset\\GloVe\\glove.6B.100d.txt", encoding="utf8")
for line in f:
values = line.split()
word = values[0]
coefs = asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
embedding_matrix = zeros((vocab_size, 100))
for word, i in t.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
vt = Tokenizer()
vt.fit_on_texts(input_valid)
vvocab_size = len(vt.word_index) + 1
vencoded_docs = vt.texts_to_sequences(input_valid)
vpadded_docs = pad_sequences(vencoded_docs, maxlen=max_length, padding='post')
tt = Tokenizer()
tt.fit_on_texts(input_test)
tvocab_size = len(tt.word_index) + 1
tencoded_docs = tt.texts_to_sequences(input_test)
tpadded_docs = pad_sequences(tencoded_docs, maxlen=max_length, padding='post')
model = Sequential()
e = Embedding(vocab_size, 100, weights=[embedding_matrix], input_length=max_length, trainable=False)
model.add(e)
model.add(Conv1D(64,kernel_size,padding='valid',activation='relu',strides=1))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(Flatten())
model.add(Dense(nclass, activation='softmax'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
print(model.summary())
model.fit(padded_docs,y_train, epochs=1, verbose=0, validation_data=(vpadded_docs, y_valid))
loss, accuracy = model.evaluate(tpadded_docs, y_test, verbose=0)
print('Accuracy: %f' % (accuracy*100))
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.