index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
54,270 | aploium/my_utils | refs/heads/master | /requestfuzz/bare.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import *
import six
import cgi
import copy
import json
from io import BytesIO
if six.PY3:
from http.server import BaseHTTPRequestHandler
from urllib import parse
else:
from future.backports.http.server import BaseHTTPRequestHandler
from future.backports.urllib import parse
import requests
from .utils import ensure_unicode, unicode_decode, make_netloc
from .datastructure import HTTPHeaders, Cookie, QueryDict, to_querydict
from .request import FuzzableRequest
from .url import Url
from .recursive_parse import parse_multipart, split_multipart
def _iter_chunked(stream, buffsize=32 * 1024):
err = ValueError("Error while parsing chunked transfer body.")
rn, sem, bs = b'\r\n', b';', b''
while True:
header = stream.read(1)
while header[-2:] != rn:
c = stream.read(1)
header += c
if not c or len(header) > buffsize:
raise err
size, _, _ = header.partition(sem)
try:
maxread = int(size.strip(), 16)
except ValueError:
raise err
if maxread == 0:
break
buff = bs
while maxread > 0:
if not buff:
buff = stream.read(min(maxread, buffsize))
part, buff = buff[:maxread], buff[maxread:]
if not part:
raise err
yield part
maxread -= len(part)
if stream.read(2) != rn:
raise err
# 因为 bare 字段已经整合到 FuzzableRequest 中
# 所以 BareRequest 已经不需要, 出于兼容性, 在这里保留一个alias
# ps: 将二进制转换为 FuzzableRequest 请使用 FuzzableRequest.from_bare()
BareRequest = FuzzableRequest
class BareLoader(BaseHTTPRequestHandler):
def __init__(self, request_bin, scheme="http", real_host=None, port=None):
self.rfile = BytesIO(request_bin)
self.raw_requestline = self.rfile.readline()
self.error_code = self.error_message = None
self.parse_request() # 这里会把header都读取掉
self.body = self._load_body() # header之后的内容是body
# 转换headers
self.headers = HTTPHeaders(self.headers.items())
self._path = ensure_unicode(self.path)
self.command = ensure_unicode(self.command)
if self.raw_requestline.endswith(b"\r\n"):
self.line_sep = b'\r\n'
elif self.raw_requestline.endswith(b'\r'):
self.line_sep = b'\r'
else:
self.line_sep = b'\n'
sp = parse.urlsplit(self._path)
self.path = sp.path
self.query_string = sp.query
self.scheme = scheme
# 解决host中有端口的问题
_host = self.headers.get("Host", "").split(":")
if len(_host) == 2 and port is None:
port = int(_host[1])
self.real_host = real_host or self.host
if port:
self.port = port
elif scheme == "https":
self.port = 443
else:
self.port = 80
def send_error(self, code, message=None, explain=None):
"""此方法无用, 仅系统调用所需"""
self.error_code = code
self.error_message = message
def _load_body(self):
if not self.chunked:
# 不是chunked的话直接读就行
return self.rfile.read()
else:
# chunked的话需要一部分一部分读然后组装
return b"".join(_iter_chunked(self.rfile))
@property
def query(self):
return to_querydict(self.query_string)
GET = query # alias
@property
def protocol(self):
return self.request_version
@property
def method(self):
return self.command
@property
def content_type(self):
return self.headers.get("Content-Type", "")
@property
def content_length(self):
return int(self.headers.get("Content-Length", -1))
@property
def text(self):
encoding, text = unicode_decode(self.body)
return text
@property
def is_json(self):
return "/json" in self.content_type
@property
def is_form(self):
return "www-form-urlencoded" in self.content_type
@property
def json(self):
return json.loads(self.text)
@property
def forms(self):
if self.is_form:
return to_querydict(self.text)
else:
return split_multipart(self.POST)[0]
@property
def xml(self): # TODO: 写xml
raise NotImplementedError()
@property
def POST(self):
if self.is_form:
return self.forms
if self.is_json:
return self.json
# decode else, eg: multipart
return parse_multipart(self.body, self.content_type)
@property
def files(self):
"""
Returns:
dict[str, cgi.FieldStorage] | OrderedMultiDict: 文件
"""
return split_multipart(self.POST)[1]
@property
def cookies(self):
_cookie_str = self.headers.get('Cookie', '')
if six.PY2:
_cookie_str = _cookie_str.decode("UTF-8")
cookies = Cookie(_cookie_str)
return cookies
@property
def raw(self):
return self.rfile.getvalue()
@property
def chunked(self):
return 'chunked' in self.headers.get("Transfer-Encoding", "")
@property
def is_xhr(self):
return "XMLHttpRequest" == self.headers.get("X-Requested-With")
@property
def user_agent(self):
return self.headers.get("User-Agent", "")
@property
def host(self):
_host = self.headers.get("Host", "")
if ":" in _host:
_host = _host[:_host.find(":")]
return _host
@property
def netloc(self):
"""
返回域名和端口, 默认端口自动省略
等效于 urlsplit 的 .netloc
Examples:
"foo.com:8888"
"bar.com"
"""
return make_netloc(self.real_host, scheme=self.scheme, port=self.port)
@property
def url(self):
return Url(parse.urlunsplit((self.scheme, self.netloc, self.path, self.query_string, "")))
@property
def url_no_query(self):
"""
返回不带query的url
Examples:
"http://foo.com:88/cat.html"
"https://foo.com/dog.php"
"""
return parse.urlunsplit((self.scheme, self.netloc, self.path, "", ""))
@property
def url_no_path(self):
"""
返回不带path的url
Examples:
"http://foo.com:88"
"https://bar.com"
"""
return parse.urlunsplit((self.scheme, self.netloc, "", "", ""))
def to_requests(self):
"""
转换为供 `requests.request` 用的参数dict
Examples:
import requests
req_dict = some_obj.to_requests()
requests.request(**req_dict)
Returns:
Dict[str, str]:
"""
_headers = copy.deepcopy(self.headers)
if self.cookies:
# 由于在py3.6以前的版本中, SimpleCookie是无序的,
# 所以不得不手动设置header头
_headers["Cookie"] = str(self.cookies)
elif "Cookie" in _headers:
del _headers["Cookie"]
if "Content-Length" in _headers:
del _headers["Content-Length"]
return {
"method": self.method,
"url": self.url.without_query,
"params": self.query,
"data": self.body,
"headers": _headers,
# "cookies": self.cookies,
}
def send(self, session=None, verify=False, **kwargs):
session = session or requests.Session()
req = self.to_requests()
req["verify"] = verify
req.update(kwargs)
return session.request(**req)
def to_fuzzable(self, klass=FuzzableRequest, **kwargs):
"""
Returns:
FuzzableRequest
"""
kw = {}
if self.is_json:
kw["json"] = self.json
elif self.is_form:
kw["data"] = self.forms
else: # 包含文件
kw["data"] = self.forms
kw["files"] = self.files
kw.update(kwargs)
bare_request = klass(
self.url, method=self.method, protocol=self.protocol,
headers=self.headers, cookies=self.cookies,
bare=self.raw,
**kw)
bare_request.host = self.host
return bare_request
@classmethod
def from_fuzzable(cls, fuzzable):
"""
Args:
fuzzable (FuzzableRequest):
"""
kw = {}
if hasattr(fuzzable, "bare"):
kw["old_bin"] = fuzzable.bare
return cls.build(
method=fuzzable.method, protocol=fuzzable.protocol,
path=fuzzable.path, query=fuzzable.query,
headers=fuzzable.headers, cookies=fuzzable.cookies,
host=fuzzable.host, port=fuzzable.port,
data=fuzzable.data, json=fuzzable.json, files=fuzzable.files,
scheme=fuzzable.scheme,
**kw
)
@classmethod
def build(cls, old=None, old_bin=None, # TODO: 拆分此函数
method=None, protocol=None,
path=None, query=None,
data=None, json=None, files=None,
headers=None, cookies=None,
host=None,
real_host=None, port=None, scheme=None,
line_sep=None,
):
"""
组装新的BareLoader
See Also:
`test_build_modified`
Args:
old (BareLoader): 已有的对象
Returns:
BareLoader
"""
_modify_cookies = bool(cookies)
_modify_url_no_query = any((scheme, host, real_host, port, path))
url_no_query = None
if old and old_bin:
raise ValueError("old and old_bin should not be specified both")
if old_bin is not None:
old = cls(old_bin)
if old is not None:
path = path or old.path
query = query or old.query_string
data = data if any((data, json, files)) else old.body
headers = headers or old.headers
cookies = cookies if cookies is not None else old.cookies
real_host = real_host or old.real_host
port = port or old.port
scheme = scheme or old.scheme
line_sep = line_sep or old.line_sep
method = method or old.method
host = host or old.host
protocol = protocol or old.request_version
url_no_query = old.url_no_query
# 处理默认值
scheme = ensure_unicode(scheme if scheme is not None else "http")
path = ensure_unicode(path if path is not None else "/")
real_host = ensure_unicode(real_host or host)
if host:
netloc = make_netloc(host, scheme=scheme, port=port)
else:
netloc = None
real_netloc = make_netloc(real_host, scheme=scheme, port=port)
headers_copy = copy.deepcopy(headers)
line_sep = line_sep if line_sep is not None else b'\r\n'
if _modify_url_no_query or not url_no_query:
# 发生了修改, 必须重新组装
url_no_query = parse.urlunsplit((scheme, real_netloc, path, "", ""))
# 处理cookies
if _modify_cookies:
# 显式指定了cookies, 则删除掉headers里的cookies,
# 否则requests会以headers里的cookies覆盖掉手动传入的
if headers_copy and "Cookie" in headers_copy:
del headers_copy["Cookie"]
if headers_copy:
# 删除会干扰 PreparedRequest 的头
for _hname in ('Content-Length', 'Content-Type'):
if _hname in headers_copy:
del headers_copy[_hname]
# ----- 利用requests的工具来构建各种参数 -------
fake_req = requests.PreparedRequest()
fake_req.prepare(
method=method, url=url_no_query,
headers=headers_copy, params=query,
cookies=cookies,
data=data, files=files, json=json
)
req = b''
# ----- 构建第一行请求行 --------
request_line = ""
# method
request_line += fake_req.method
request_line += ' '
# path_url
request_line += fake_req.path_url
request_line += " " # 中间的空格
# protocol
if protocol is None:
request_line += "HTTP/1.1"
else:
request_line += protocol
# 写入第一行
req += request_line.encode("UTF-8") + line_sep
# -------- 第一行完成 ----------
# -------- 构建headers ---------
headers_copy = copy.deepcopy(headers)
if _modify_cookies:
# 如果指定了新cookie, 就重建cookie
_cookie_obj = Cookie(cookies)
headers_copy["Cookie"] = str(_cookie_obj)
if fake_req.headers.get("Content-Length"):
headers_copy["Content-Length"] = fake_req.headers["Content-Length"]
if fake_req.headers.get("Transfer-Encoding"):
headers_copy["Transfer-Encoding"] = fake_req.headers["Transfer-Encoding"]
# PreparedRequest 可能会改变Content-Type
_new_content_type = fake_req.headers.get("Content-Type", "")
if _new_content_type and _new_content_type not in headers_copy.get("Content-Type", ""):
headers_copy["Content-Type"] = _new_content_type
# 写host, 实际上是netloc
if netloc and netloc != headers_copy.get("Host"):
headers_copy["Host"] = netloc
# 写入headers
for name, value in headers_copy.items():
_line = "{}: {}".format(name, value)
req += _line.encode("UTF-8") + line_sep
# headers结束
req += line_sep
# -------- 构建body -----------
_body = fake_req.body # 读取 PreparedRequest 中的body
if _body:
if isinstance(_body, six.text_type):
_body = _body.encode("UTF-8") # TODO: 根据header来检测编码
req += _body
# -------- 构建新的 BareLoader --------
new_bare_req = cls(req, real_host=real_host,
scheme=scheme, port=port, )
# 返回
return new_bare_req
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,271 | aploium/my_utils | refs/heads/master | /executor_buffmap.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import unicode_literals, division
import sys
import time
import functools
import logging
from concurrent.futures import TimeoutError
logger = logging.getLogger(__name__)
if sys.version_info[0] == 2:
import itertools
izip = itertools.izip
else:
izip = zip
def _find_oldest_task(taskset):
return min(x[1] for x in taskset)
def base_buffmap(executor, fn, *iterables, **kwargs):
"""
增强版的 concurrent.futures.Executor.map()
- 丢弃已完成的任务参数, 在长队列时显著节约内存(相对原版)
- 半乱序执行
- 一边运行一边展开 iterables
- 支持传入全局的kv参数 common_kwargs
- 避免子进程/线程意外挂掉后出现zombie进程导致主进程进入无限等待 (需要配合timeout)
- 自动shutdown
"""
common_kwargs = kwargs.pop("common_kwargs", {})
buffsize = kwargs.pop("buffsize", executor._max_workers * 2 + 5)
check_interval = kwargs.pop("check_interval", 2)
timeout = kwargs.pop("timeout", None)
if "chunksize" in kwargs:
del kwargs["chunksize"]
if kwargs:
raise ValueError("unknown kwargs: {}".format(kwargs))
taskset = set()
_iter = izip(*iterables)
_oldest_time = time.time()
def _fill_taskset():
while len(taskset) < buffsize:
try:
args = next(_iter)
except StopIteration:
return
taskset.add((
executor.submit(fn, *args, **common_kwargs),
time.time() if timeout is not None else None,
))
_fill_taskset()
_sleep_interval = 0
_done_tasks = []
try:
while taskset:
for task in taskset:
# 遍历检查任务是否已完成
if task[0].done():
# 若完成则先记录, 之后再挨个挑出
_done_tasks.append(task)
for task in _done_tasks:
taskset.remove(task)
if timeout is not None:
if _oldest_time - time.time() > timeout:
# 找出最古老的任务
# 仅当上次记录的时间超时了才真正找出最老任务
# 可以减少计算量.
# 因为如果旧记录中最老的都没有超时, 后面新的就更加不会
_oldest_time = _find_oldest_task(taskset)
if _oldest_time - time.time() > timeout:
# 超时就抛出错误
raise TimeoutError("timeout in running executor")
_fill_taskset() # 先填充再yield结果, 减少时间浪费
for task in _done_tasks:
yield task[0].result()
if not _done_tasks: # 没有结果就持续等待...
time.sleep(_sleep_interval)
if _sleep_interval < check_interval:
_sleep_interval += check_interval / 10.0
else:
_sleep_interval = 0
_done_tasks = []
finally:
for task in taskset:
task[0].cancel()
for task in _done_tasks:
task[0].cancel()
executor.shutdown(wait=False)
thread_buffmap = base_buffmap
def _get_chunks(chunksize, *iterables):
""" copy from python 3.6.1 `concurrent.futures.process._get_chunks` """
it = izip(*iterables)
while True:
chunk = tuple(itertools.islice(it, chunksize))
if not chunk:
return
yield chunk
def _process_chunk(fn, chunk):
"""copy from python 3.6.1 `concurrent.futures.process._process_chunk` """
return [fn(*args) for args in chunk]
def process_buffmap(executor, fn, *iterables, **kwargs):
"""
增强版的 concurrent.futures.process.ProcessPoolExecutor.map()
- 在 py2 下支持 chunksize
- 出错时自动杀死子进程
"""
chunksize = kwargs.pop("chunksize", 1)
if chunksize < 1:
raise ValueError("chunksize must be >= 1.")
results = base_buffmap(
executor,
functools.partial(_process_chunk, fn),
_get_chunks(chunksize, *iterables),
**kwargs
)
_processes = executor._processes
try:
return itertools.chain.from_iterable(results)
except:
for p in _processes:
try:
p.terminate()
except:
logger.warning("unable to shutdown subprocess {}".format(p), exc_info=True)
raise
def process_executor_shutdown(executor, wait=True):
import multiprocessing
multiprocessing.Process.is_alive()
for p in executor._processes:
try:
p.terminate()
except:
logger.warning("unable to shutdown subprocess {}".format(p), exc_info=True)
executor.shutdown(wait=wait)
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,272 | aploium/my_utils | refs/heads/master | /raw2requests.py | #!/usr/bin/env python3
# coding=utf-8
"""Construct a requests.Response from raw http response bytes(including headers)"""
import io
__author__ = "Aploium <i@z.codes>"
class FakeSocket:
def __init__(self, data=None):
self.bytes_io = io.BytesIO(data)
def close(self):
pass
def makefile(self, *args, **kwargs):
return self.bytes_io
def bytes2response(data, level=3,
method=None, url="http://example.com", req_headers=None, req_files=None,
req_data=None, req_auth=None, req_json=None
):
"""
Construct a requests.Response from raw http response bytes(including headers)
Warning: although we could decode raw bytes to response object,
this is not the right way these library were designed to,
this decode may cause unexpected bugs.
:param data: raw http response bytes data, including headers
:type data: bytes
:param level:
level=0: decode as http.client.HTTPResponse
level=1: decode as requests.packages.urllib3.response.HTTPResponse
level=2: decode to requests.Response (default)
:rtype: requests.Response
"""
# These imports can be moved outside to gain slight performance improvement
# they are placed here by default to avoid compatible issues
import http.client
import requests.packages
import requests.adapters
fake_socket = FakeSocket(data)
resp_builtin = http.client.HTTPResponse(fake_socket, method=method, url=url) # type: http.client.HTTPResponse
resp_builtin.begin()
if level == 0:
return resp_builtin, resp_builtin.read() # type: http.client.HTTPResponse,bytes
# resolve to the requests builtin urllib3 HTTPResponse
resp_requests_basic = requests.packages.urllib3.response.HTTPResponse.from_httplib(resp_builtin)
if level == 1:
return resp_requests_basic # type: requests.packages.urllib3.response.HTTPResponse
# fake Request
req = requests.Request(
method=method, url=url, headers=req_headers, files=req_files,
data=req_data, auth=req_auth, json=req_json
)
req = req.prepare()
# fake adapter, which is necessarily for response construct
adapter = requests.adapters.HTTPAdapter()
# resolve to the wellknown/often-see requests.Response
wellknown_resp = adapter.build_response(req, resp_requests_basic)
wellknown_resp._content = resp_requests_basic.data
return wellknown_resp # type: requests.Response
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,273 | aploium/my_utils | refs/heads/master | /requests_connection_pool.py | # coding=utf-8
"""
本模块提供了一个线程安全的 keep-alive 链接池
requests的连接在每个session中是自动 keep-alive 的,
在 `connection_keep_alive` 关闭时, 每次请求都会创建一个新的session,
并发起一次新的请求, 则会带来相当大的连接开销(时间上)
通过保持并复用 requests 的session, 可以极大地减少requests在请求远程服务器时的连接延迟
以前的版本是线程不安全, 当并发数大时会出现 ConnectionResetError
"""
import time
import requests
import threading
from decorator import contextmanager
import six.moves.urllib.parse as urllib_parse
SESSION_TTL = 30 # 在清除过期session时, 会丢弃所有x秒未活动的session
_gc_checkpoint = time.time()
# session池
pool = {
"example.com": [
# 每个域名下都有一堆session,
# session的获取遵循 LIFO(后进先出) 原则,
# 即优先获取最近使用过的 session
# 这样可以增加 keep-alive 的存活几率
{
"domain": "example.com",
"session": requests.Session(),
"active": time.time(),
},
],
}
cleaning_lock = threading.Lock()
locked_session = threading.local() # 这是一个 thread-local 变量
def get_session(domain_or_url):
"""
获取一个此域名的 keep-alive 的session
:param domain_or_url: 域名
:type domain_or_url: str
:rtype: requests.Session
"""
domain = urllib_parse.urlsplit(domain_or_url).netloc or domain_or_url
if domain not in pool:
pool[domain] = []
if not hasattr(locked_session, "sessdicts"):
# 这个变量用于存储本线程中被锁定的session
# 当一个session被拿出来使用时, 会从 pool 中被移除, 加入到下面这个变量中
# 当线程结束后, 需要调用 release_lock() 来释放被锁定的session
# 此时被锁定的session会重新进入session池
locked_session.sessdicts = []
if not pool[domain]:
# 线程池空, 新建一个 session
sessdict = {
"domain": domain,
"sessobj": requests.Session(),
}
else:
# 从线程池中取出最近的一个
sessdict = pool[domain].pop()
sessdict["active"] = time.time()
locked_session.sessdicts.append(sessdict)
if _gc_checkpoint < time.time() - SESSION_TTL:
with cleaning_lock:
clear()
return sessdict["sessobj"] # type: requests.Session
@contextmanager
def session(domain_or_url):
sess = get_session(domain_or_url)
yield sess
release_lock(sess)
def release_lock(session=None):
if not hasattr(locked_session, "sessdicts"):
if session is not None:
raise ValueError("You DONT have this session!")
return
if session is not None:
for _sessdict in locked_session.sessdicts:
if _sessdict["sessobj"] == session:
sessdict = _sessdict
break
else:
raise ValueError("You DONT have this session: {}".format(session))
locked_session.sessdicts.remove(sessdict)
pool[sessdict["domain"]].append(sessdict)
for sessdict in locked_session.sessdicts: # type: dict
pool[sessdict["domain"]].append(sessdict)
def clear(force_flush=False):
if force_flush:
pool.clear()
else:
for domain in list(pool.keys()):
pool[domain] = [s for s in pool[domain] if s["active"] > time.time() - SESSION_TTL]
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,274 | aploium/my_utils | refs/heads/master | /requestfuzz/tests/test_mutants.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import *
import collections
import json
from future.backports.urllib import parse
from requestfuzz import FuzzableRequest
from requestfuzz.payload import Payload
from requestfuzz.mutant import MutantBase, ShallowMutant, PayloadFactoryBase, DeepMutant, HeadersMutant
from requestfuzz.tests.test_fuzzable import make_complex_req
class DummyPayloadFactory(PayloadFactoryBase):
def make(self, key=None, value=None, place=None, node=None):
yield Payload("{}__{}".format(key, value)), {}
def test_payload_factory_dummy():
fz = make_complex_req()
pf = DummyPayloadFactory(fz)
assert list(pf.make(key="cat", value="dog")) \
== [(Payload("cat__dog"), {})]
def test_shallow_mutant():
fz = make_complex_req()
mutant = ShallowMutant(DummyPayloadFactory)
expected = [
# query
b"/anything?a=a__1&b=2&c=3",
b"/anything?a=1&b=b__2&c=3&a=x&a=y",
b"/anything?a=1&b=2&c=c__3&a=x&a=y",
b"/anything?a=a__x&b=2&c=3",
b"/anything?a=a__y&b=2&c=3",
# data
b"a=a__b&c=d&c=e&e=x&x=f",
b"a=b&c=c__d&e=x&x=f",
b"a=b&c=c__e&e=x&x=f",
b"a=b&c=d&c=e&e=e__x&x=f",
b"a=b&c=d&c=e&e=x&x=x__f",
]
for atk_fz, correct in zip(mutant.make(fz), expected):
assert correct in atk_fz.to_bare()
def test_deep_mutant_simple():
"""在传入值不涉及递归的情况下, 行为应该和shallow是 *几乎* 相同的
除了一个优点以外: 不会丢失重复key
"""
fz = make_complex_req()
mutant = DeepMutant(DummyPayloadFactory)
expected = [
# query
b"/anything?a=a__1&b=2&c=3&a=x&a=y",
b"/anything?a=1&b=b__2&c=3&a=x&a=y",
b"/anything?a=1&b=2&c=c__3&a=x&a=y",
b"/anything?a=1&b=2&c=3&a=a__x&a=y",
b"/anything?a=1&b=2&c=3&a=x&a=a__y",
# data
b"a=a__b&c=d&c=e&e=x&x=f",
b"a=b&c=c__d&c=e&e=x&x=f",
b"a=b&c=d&c=c__e&e=x&x=f",
b"a=b&c=d&c=e&e=e__x&x=f",
b"a=b&c=d&c=e&e=x&x=x__f",
]
for atk_fz, correct in zip(mutant.make(fz), expected):
assert correct in atk_fz.to_bare()
def test_deep_mutant_simple2():
"""fz的data是json, 其他不变"""
fz = make_complex_req()
fz.bin_body = json.dumps(collections.OrderedDict([
("x", "1"),
("b", "2"),
("z", "3"),
("kerbin", "kerbal"),
])).encode("UTF-8")
mutant = DeepMutant(DummyPayloadFactory)
expected = [
# query
b"/anything?a=a__1&b=2&c=3&a=x&a=y",
b"/anything?a=1&b=b__2&c=3&a=x&a=y",
b"/anything?a=1&b=2&c=c__3&a=x&a=y",
b"/anything?a=1&b=2&c=3&a=a__x&a=y",
b"/anything?a=1&b=2&c=3&a=x&a=a__y",
# data
b"""{"x": "x__1", "b": "2", "z": "3", "kerbin": "kerbal"}""",
b"""{"x": "1", "b": "b__2", "z": "3", "kerbin": "kerbal"}""",
b"""{"x": "1", "b": "2", "z": "z__3", "kerbin": "kerbal"}""",
b"""{"x": "1", "b": "2", "z": "3", "kerbin": "kerbin__kerbal"}""",
]
for atk_fz, correct in zip(mutant.make(fz), expected):
assert correct in atk_fz.to_bare()
def test_deep_mutant_complex():
"""测试复杂的包含递归的项"""
fz = make_complex_req()
fz.query["c"] = json.dumps(dict(
j="son",
f="yet=another&form=1",
))
fz.data["e"] = parse.urlencode([
("cat", "dog"),
("j2", '{"foo":"bar"}'),
])
mutant = DeepMutant(DummyPayloadFactory)
expected = [
# query
b"/anything?a=a__1&b=2&c=%7B%22j%22%3A+%22son%22%2C+%22f%22%3A+%22yet%3Danother%26form%3D1%22%7D&a=x&a=y",
b"/anything?a=1&b=b__2&c=%7B%22j%22%3A+%22son%22%2C+%22f%22%3A+%22yet%3Danother%26form%3D1%22%7D&a=x&a=y",
b"/anything?a=1&b=2&c=c__%7B%22j%22%3A+%22son%22%2C+%22f%22%3A+%22yet%3Danother%26form%3D1%22%7D&a=x&a=y",
b"/anything?a=1&b=2&c=%7B%22j%22%3A+%22son%22%2C+%22f%22%3A+%22yet%3Danother%26form%3D1%22%7D&a=a__x&a=y",
b"/anything?a=1&b=2&c=%7B%22j%22%3A+%22son%22%2C+%22f%22%3A+%22yet%3Danother%26form%3D1%22%7D&a=x&a=a__y",
b"/anything?a=1&b=2&c=%7B%22j%22%3A+%22j__son%22%2C+%22f%22%3A+%22yet%3Danother%26form%3D1%22%7D&a=x&a=y",
b"/anything?a=1&b=2&c=%7B%22j%22%3A+%22son%22%2C+%22f%22%3A+%22f__yet%3Danother%26form%3D1%22%7D&a=x&a=y",
b"/anything?a=1&b=2&c=%7B%22j%22%3A+%22son%22%2C+%22f%22%3A+%22yet%3Dyet__another%26form%3D1%22%7D&a=x&a=y",
b"/anything?a=1&b=2&c=%7B%22j%22%3A+%22son%22%2C+%22f%22%3A+%22yet%3Danother%26form%3Dform__1%22%7D&a=x&a=y",
# data
b"a=a__b&c=d&c=e&e=cat%3Ddog%26j2%3D%257B%2522foo%2522%253A%2522bar%2522%257D&x=f",
b"a=b&c=c__d&c=e&e=cat%3Ddog%26j2%3D%257B%2522foo%2522%253A%2522bar%2522%257D&x=f",
b"a=b&c=d&c=c__e&e=cat%3Ddog%26j2%3D%257B%2522foo%2522%253A%2522bar%2522%257D&x=f",
b"a=b&c=d&c=e&e=e__cat%3Ddog%26j2%3D%257B%2522foo%2522%253A%2522bar%2522%257D&x=f",
b"a=b&c=d&c=e&e=cat%3Ddog%26j2%3D%257B%2522foo%2522%253A%2522bar%2522%257D&x=x__f",
b"a=b&c=d&c=e&e=cat%3Dcat__dog%26j2%3D%257B%2522foo%2522%253A%2B%2522bar%2522%257D&x=f",
b"a=b&c=d&c=e&e=cat%3Ddog%26j2%3Dj2__%257B%2522foo%2522%253A%2B%2522bar%2522%257D&x=f",
b"a=b&c=d&c=e&e=cat%3Ddog%26j2%3D%257B%2522foo%2522%253A%2B%2522foo__bar%2522%257D&x=f",
]
for atk_fz, correct in zip(mutant.make(fz), expected):
assert correct in atk_fz.to_bare()
# print(atk_fz.to_bare().decode())
def test_headers_mutant():
fz = make_complex_req()
fz.headers["User-Agent"] = "monkey"
fz.headers["Referer"] = "http://cat.com"
mutant = HeadersMutant(DummyPayloadFactory)
expected = [
b"User-Agent: User-agent__monkey",
b"X-Forward-For: X-Forward-For__",
b"referer: Referer__http://cat.com",
]
for atk_fz, correct in zip(mutant.make(fz), expected):
assert correct in atk_fz.to_bare()
if __name__ == '__main__':
test_deep_mutant_simple()
test_deep_mutant_simple2()
test_deep_mutant_complex()
test_headers_mutant()
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,275 | aploium/my_utils | refs/heads/master | /requestfuzz/__init__.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import *
from future import standard_library as _standard_library
_standard_library.install_aliases()
from .datastructure import (
OrderedMultiDict, HTTPHeaders, QueryDict, Cookie)
from .request import FuzzableRequest
from .bare import BareRequest, BareLoader
from .url import Url
from .csrf import BaseCSRF, GenericCSRF
from .recursive_parse import load, BaseNode
from .mutant import MutantBase, PayloadFactoryBase, ShallowMutant, DeepMutant
from .payload import Payload
__author__ = "aploium <i@z.codes>"
__license__ = "MIT"
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,276 | aploium/my_utils | refs/heads/master | /requestfuzz/plugin.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import *
import collections
import weakref
try: # 供type hint使用
from .request import FuzzableRequest
except:
pass
class FzPluginBase(object):
"""
用于以插件形式给 FuzzableRequest 添加额外功能
插件允许以事件钩子的形式影响fz的行为
现在只有1个钩子, 如果还有其他需求, 到时候再加
Args:
fz (FuzzableRequest): 关联的请求
"""
def __init__(self, fz):
"""
Args:
fz (FuzzableRequest):
"""
# 使用weakref以避免循环引用, 实际操作时和普通的没有区别
self.fz = weakref.proxy(fz)
def on_init_complete(self):
"""在fz初始化完成后被调用"""
class AutoHeader(FzPluginBase):
"""
自动给请求添加浏览器中常见的缺失的头, 包括referer
"""
DEFAULT_HEADERS = collections.OrderedDict([
('Accept-Encoding', "gzip, deflate"),
('Accept-Language', "zh-CN,zh;q=0.8,en;q=0.6,it;q=0.4,es;q=0.2"),
('User-Agent',
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"),
('Accept', "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"),
])
def on_init_complete(self):
for k, v in self.DEFAULT_HEADERS.items():
if k not in self.fz.headers:
self.fz.headers[k] = v
self._set_referer()
def _set_referer(self, force=False):
if not force and "Referer" in self.fz.headers:
return
else:
self.fz.headers["Referer"] = self.fz.url.tostr()
class AutoCleanParam(FzPluginBase):
"""自动移除无用的查询key, 例如 spm 之类的"""
USELESS_PARM = [
'spm', '_spm', '__preventCache', '_t',
'timestamp', '_timestamp', '__timestamp',
"_",
]
def on_init_complete(self):
for key in self.USELESS_PARM:
if key in self.fz.query:
del self.fz.query[key]
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,277 | aploium/my_utils | refs/heads/master | /err_hunter/err_hunter/mylogging.py | # coding=utf-8
from __future__ import absolute_import, unicode_literals
import logging
import json
import copy
import logging.handlers
from . import logger_global_custom_data
from .third_party import logzero
from .mylogger import MultiprocessRotatingFileHandler
import inspect
# 更多logging的level
# 注释掉的是标准level
# CRITICAL = 50
# ERROR = 40
# WARNING = 30
# INFO = 20
VERBOSE = 15
# DEBUG = 10
TRACE = 8
NOISE = 6
LOWEST = 1
FILE_LOG_FORMAT = "%(asctime)s - %(filename)s:%(lineno)s - %(levelno)s %(levelname)s %(pathname)s %(module)s %(funcName)s %(created)f %(thread)d %(threadName)s %(process)d %(name)s - %(message)s"
# 对应的aliyun SLS正则:
# (\d+-\d+-\d+\s\S+)\s-\s([^:]+):(\d+)\s+-\s+(\d+)\s+(\w+)\s+(\S+)\s+(\w+)\s+(\S+)\s+(\S+)\s+(\d+)\s+([\w-]+)\s+(\d+)\s+([\w\.]+)\s+-\s+(.*)
# FILE_LOG_FORMAT_WITH_CUSTOM_DATA = "%(asctime)s - %(filename)s:%(lineno)s - %(levelno)s %(levelname)s %(pathname)s %(module)s %(funcName)s %(created)f %(thread)d %(threadName)s %(process)d %(name)s - <data:%(data_json)s> - %(message)s"
# 有自定义额外数据时的格式
FILE_LOG_FORMAT_WITH_CUSTOM_DATA = "%(asctime)s - %(filename)s:%(lineno)s - %(levelno)s %(levelname)s %(pathname)s %(module)s %(funcName)s %(created)f %(thread)d %(threadName)s %(process)d %(name)s - %(data_json)s - %(message)s"
# 对应的aliyun SLS正则: 兼容无data形式的
# (\d+-\d+-\d+\s\S+)\s-\s([^:]+):(\d+)\s+-\s+(\d+)\s+(\w+)\s+(\S+)\s+(\w+)\s+(\S+)\s+(\S+)\s+(\d+)\s+([\w-]+)\s+(\d+)\s+([\w\.]+)\s+- (\{.+?\}|) ?-?\s*(.*)
_level_installed = False
class FormatterWithCustomData(logging.Formatter):
def __init__(self, fmt, *args, **kwargs) -> None:
super().__init__(fmt, *args, **kwargs)
self._ori_fmt = self._style._fmt
def formatMessage(self, record):
if logger_global_custom_data:
if not getattr(record, 'data', None):
record.data = copy.copy(logger_global_custom_data)
else:
for _k, _v in logger_global_custom_data.items():
record.data.setdefault(_k, _v)
if hasattr(record, 'data'):
record.data_json = json.dumps(record.data, separators=(',', ':'), ensure_ascii=False)
self._style._fmt = FILE_LOG_FORMAT_WITH_CUSTOM_DATA
result = super().formatMessage(record)
self._style._fmt = self._ori_fmt
return result
else:
return super().formatMessage(record)
def _install_custom_levels():
global _level_installed
if _level_installed:
return
logging.addLevelName(VERBOSE, "VERBOSE")
logging.addLevelName(TRACE, "TRACE")
logging.addLevelName(NOISE, "NOISE")
logging.addLevelName(LOWEST, "LOWEST")
_level_installed = True
def _lower_level(*levels):
lowest = 0
for level in levels:
if not isinstance(level, int):
level = logging.getLevelName(level)
if level < lowest:
lowest = level
return lowest
def basicConfig(level=logging.INFO, color=False, handler=None, formatter=None,
logfile=None, file_level=None, maxBytes=0, backupCount=0,
file_format=FILE_LOG_FORMAT, multi_process=False,
file_ensure_single_line=True,
):
_install_custom_levels()
logging._acquireLock()
try:
if len(logging.root.handlers) != 0:
return
handler = handler or logging.StreamHandler()
formatter = formatter or logzero.LogFormatter(color=color)
handler.setFormatter(formatter)
logging.root.addHandler(handler)
if logfile:
if multi_process:
file_handler = MultiprocessRotatingFileHandler(
logfile, maxBytes=maxBytes, backupCount=backupCount,
ensure_single_line=file_ensure_single_line,
)
else:
file_handler = logging.handlers.RotatingFileHandler(
logfile, maxBytes=maxBytes, backupCount=backupCount)
file_formatter = FormatterWithCustomData(file_format)
file_handler.setFormatter(file_formatter)
logging.root.addHandler(file_handler)
if file_level is not None:
file_handler.setLevel(file_level)
_root_level = _lower_level(level, file_level)
handler.setLevel(level)
logging.root.setLevel(_root_level)
if file_level is None:
logging.root.setLevel(level)
finally:
logging._releaseLock()
def colorConfig(level=logging.INFO, handler=None, formatter=None, **kwargs):
basicConfig(level=level, color=True, handler=handler, formatter=formatter, **kwargs)
def _get_outframe_main(frame):
outframe = frame.f_back
return outframe.f_globals["__name__"]
def getLogzeroLogger(name=None, logfile=None, level=logging.NOTSET,
formatter=None, maxBytes=0, backupCount=0, fileLoglevel=None):
name = name or _get_outframe_main(inspect.currentframe())
return logzero.setup_logger(
name=name, logfile=logfile, level=level, formatter=formatter,
maxBytes=maxBytes, backupCount=backupCount, fileLoglevel=fileLoglevel,
)
class EnhancedLogger(logging.Logger):
def verbose(self, msg, *args, **kwargs):
"""高于 DEBUG, 低于 INFO 的级别"""
if self.isEnabledFor(VERBOSE):
self._log(VERBOSE, msg, args, **kwargs)
def trace(self, msg, *args, **kwargs):
"""比 DEBUG 低一层的级别"""
if self.isEnabledFor(TRACE):
self._log(TRACE, msg, args, **kwargs)
def noise(self, msg, *args, **kwargs):
"""比 DEBUG 低两层的级别"""
if self.isEnabledFor(NOISE):
self._log(NOISE, msg, args, **kwargs)
def lowest(self, msg, *args, **kwargs):
"""最低级别的log"""
if self.isEnabledFor(LOWEST):
self._log(LOWEST, msg, args, **kwargs)
def _log(self, level, msg, args, exc_info=None,
extra=None, stack_info=False, **kwargs):
if kwargs:
extra = extra if extra is not None else {}
extra.setdefault('data', {})
kw_data_norm = {}
for _k, _v in kwargs.items():
if isinstance(_v, (str, int, float, bool)) or _v is None:
kw_data_norm[_k] = _v
else:
kw_data_norm[_k] = str(_v)
extra['data'].update(kw_data_norm)
super()._log(level, msg, args, exc_info, extra, stack_info)
def getLogger(name=None):
"""
Args:
name (str|int): 若不指定则会自动获取
Returns:
EnhancedLogger: 比标准logger多了一些级别
:rtype: EnhancedLogger
"""
name = name or _get_outframe_main(inspect.currentframe())
_old_cls = logging.Logger.manager.loggerClass
try:
logging.Logger.manager.loggerClass = EnhancedLogger
logger = logging.getLogger(name) # type: EnhancedLogger
finally:
logging.Logger.manager.loggerClass = _old_cls
return logger # type: EnhancedLogger
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,278 | aploium/my_utils | refs/heads/master | /requestfuzz/tests/test_rebuild_bare.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import *
import pytest
from requestfuzz import FuzzableRequest
def _fmt_line_sep(bare, line_sep=b"\r\n"):
return line_sep.join(bare.splitlines(False))
def test_rebuild_bare1():
bare = b"""GET / HTTP/1.1
Host: 11.22.33.44
User-Agent: HTTPie/0.9.9
Accept-Encoding: identity
Accept: */*
Connection: keep-alive
"""
bare = _fmt_line_sep(bare)
assert FuzzableRequest.from_bare(bare).to_bare() == bare
def test_rebuild_bare2():
bare = b"""GET / HTTP/1.1
Host: www.baidu.com
Connection: keep-alive
Cache-Control: max-age=0
Upgrade-Insecure-Requests: 1
User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8
Accept-Encoding: identity
Accept-Language: zh-CN,zh;q=0.8,en;q=0.6,it;q=0.4,es;q=0.2,cy;q=0.2
Cookie: __cfduid=aa; BAIDUID="bb:FG=1"; PSTM=cc; BIDUPSID=dd; MCITY=-%3A; ispeed_lsm=2; BD_HOME=0; BD_UPN=123253; BD_CK_SAM=1; PSINO=5; H_PS_PSSID=as12rf; BDORZ=FEEFerg; BDSVRTM=0
"""
bare = _fmt_line_sep(bare)
assert FuzzableRequest.from_bare(bare).to_bare() == bare
def test_rebuild_bare4():
bare = b"""\
POST /vulnerabilities/exec/ HTTP/1.1
Host: some.domain.com:2333
Connection: keep-alive
Content-Length: 26
User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.2333.113 Safari/537.36
Content-Type: application/x-www-form-urlencoded
Accept: */*
Accept-Encoding: gzip, deflate
Accept-Language: zh-CN,zh;q=0.8
Cookie: PHPSESSID=vvvvvvvvvvvv; security=low
ip=127.0.0.1&Submit=Submit"""
bare = _fmt_line_sep(bare)
assert FuzzableRequest.from_bare(bare).to_bare() == bare
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,279 | aploium/my_utils | refs/heads/master | /err_hunter/demo_filelog.py | #!/usr/bin/env python3
# coding=utf-8
import err_hunter
err_hunter.basicConfig("INFO", logfile="file.log", file_level="DEBUG",
maxBytes=1024 * 1024, backupCount=5,
)
logger = err_hunter.getLogger()
logger.error("err")
logger.warning("warning")
logger.info("info")
logger.debug("debug, only appears in file.log")
logger.info("please see `file.log` for filelog")
for i in range(50000):
logger.info("info %s", i)
logger.debug("debug %s", i)
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,280 | aploium/my_utils | refs/heads/master | /requestfuzz/utils.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import *
import six
import collections
from tldextract import TLDExtract
try:
import cchardet
except ImportError:
try:
import chardet as cchardet
except ImportError:
pass
def unicode_decode(content):
r"""
用多种方式尝试解码二进制内容为unicode文本
copy from `unicode_decode.py`
:return: tuple(编码, 解码后的unicode)
:rtype: (str, bytes)
>>> unicode_decode("简体中文UTF8汉字".encode("utf8"))
('UTF-8', '简体中文UTF8汉字')
>>> unicode_decode("简体中文GBK汉字".encode("gbk"))
('GB18030', '简体中文GBK汉字')
>>> unicode_decode(b'\xfa\xfb\xfc')
Traceback (most recent call last):
...
UnicodeError: unable to decode b'\xfa\xfb\xfc'
"""
try:
return "UTF-8", content.decode("UTF-8")
except:
pass
try:
return "GB18030", content.decode("GB18030")
except:
pass
try:
encoding = cchardet.detect(content)["encoding"]
return encoding, content.decode(encoding)
except:
pass
raise UnicodeError("unable to decode {}".format(repr(content[:32])))
def ensure_unicode(content):
if content is None:
return content
if isinstance(content, six.text_type):
return content
else:
_, uni = unicode_decode(content)
return uni
def make_netloc(host, scheme="http", port=None):
if not port \
or scheme == "https" and (not port or port == 443) \
or scheme == "http" and (not port or port == 80):
return host
else:
return "{}:{}".format(host, port)
def like_dict(obj):
"""判断一个对象是否像是dict"""
if isinstance(obj, (dict, collections.Mapping)):
return True
return hasattr(obj, "__getitem__") and hasattr(obj, "items")
def like_list(obj):
"""判断一个对象是否像是list, 不含str
注意:
由于不存在list和tuple的特有方法 (相对于dict和str),
即
>>> attr = lambda x: set(dir(x))
>>> (attr([])&attr(tuple())).difference(attr({})|attr(""))
set()
是空集
所以无法可靠地判断一个自定义对象是否是list-like
"""
if isinstance(obj, (tuple, list)):
return True
elif isinstance(obj, six.string_types):
return False
elif isinstance(obj, collections.Sequence):
return True
try:
return hasattr(obj, "__getitem__") and hasattr(obj, "index")
except:
return False
def is_ip_address(address):
if not isinstance(address, six.string_types):
return False
parts = address.split(".")
if len(parts) != 4:
return False
for item in parts:
if not item.isdigit():
return False
if not 0 <= int(item) <= 255:
return False
return True
def extract_root_domain(domain):
"""
获取根域名
# copied form w3af.core.data.parsers.doc.url.URL#get_root_domain
"""
# An IP address has no 'root domain'
if is_ip_address(domain):
return domain
extract = TLDExtract(fallback_to_snapshot=True)
extract_result = extract(domain)
return '%s.%s' % (extract_result.domain, extract_result.suffix)
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,281 | aploium/my_utils | refs/heads/master | /requestfuzz/csrf.py | #!/usr/bin/env python3
# coding=utf-8
"""CSRF工具箱"""
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import *
import weakref
import re
import requests
import logging
from requests.exceptions import RequestException
from .datastructure import OrderedMultiDict
from .plugin import FzPluginBase
try:
from . import request
except:
pass
logger = logging.getLogger(__name__)
class UnableGetCSRFTokenError(Exception):
pass
class BaseCSRF(FzPluginBase):
"""
Args:
fz (request.FuzzableRequest): 关联的请求
token_places (list[tuple[str,str]]):
请求中包含token的地方, 例如 [("data", "csrf"), ("headers", "token")]
"""
TOKEN_KEYWORDS = ["csrf", ]
POTENTIAL_PLACES = ["query", "data", "headers", "json"]
def __init__(self, fz):
"""
Args:
fz (request.FuzzableRequest):
"""
super(BaseCSRF, self).__init__(fz)
self.token_places = self.find_token_places()
self.new_token = None
def on_init_complete(self):
self.apply()
def get_new_token(self): # TODO:
"""
获取一个可用token
请在子类中覆盖掉本方法
"""
raise NotImplementedError
@property
def need_token(self):
return bool(self.token_places)
def find_token_places(self):
"""
检查原始记录中是否 *需要* csrf_token
即检查有没有特征字段
"""
token_places = set()
for place in self.POTENTIAL_PLACES:
dic = getattr(self.fz, place) # type: OrderedMultiDict
if not dic:
continue
for key in dic.keys():
for keyword in self.TOKEN_KEYWORDS:
if keyword in key.lower():
# 发现一个token
token_places.add((place, key))
return token_places
def prepare(self, force=False):
"""调用 get_new_token() 来获取token"""
if self.new_token and not force:
return
try:
self.new_token = self.get_new_token()
except (UnableGetCSRFTokenError, RequestException) as e:
logger.warning("unable to get csrf token for %s %s", self.fz
.url, e)
self.new_token = ""
def write_token(self):
"""将token写入原请求"""
for (place, csrf_key) in self.token_places:
# 下面这句话基本等效于, 不过更方便遍历:
# self.fz.query[key] = token
getattr(self.fz, place)[csrf_key] = self.new_token
def apply(self):
"""将CSRF应用到request里"""
if not self.need_token:
# 不需要token, 直接退出
return
self.prepare()
self.write_token()
def search_csrf_token(self, content):
# 首先找input
for (csrf_position, csrf_key) in self.token_places:
_regex = re.compile(
r'(?i)<[^>]*name=[\"\']{1,1}%s[\"\']{1,1}[^>]*value\s*=\s*[\"\']{1,1}([a-zA-Z0-9\-\_]+)[\"\']{1,1}' % csrf_key)
token = _regex.findall(content)
if token:
return token[0]
_regex = re.compile(
r'(?i)<[^>]*name=[\"\']{1,1}[a-zA-Z0-9\-\_\.]*csrf[a-zA-Z0-9\-\_\.]*[\"\']{1,1}[^>]*value\s*=\s*[\"\']{1,1}([a-zA-Z0-9\-\_]+)[\"\']{1,1}')
token = _regex.findall(content)
if token:
return token[0]
_regex = re.compile(
r'(?i)<[^>]*id=[\"\']{1,1}[a-zA-Z0-9\-\_\.]*csrf[a-zA-Z0-9\-\_\.]*[\"\']{1,1}[^>]*value\s*=\s*[\"\']{1,1}([a-zA-Z0-9\-\_]+)[\"\']{1,1}')
token = _regex.findall(content)
if token:
return token[0]
_regex = re.compile(
r'(?i)<[^>]*name=[\"\']{1,1}[a-zA-Z0-9\-\_\.]*csrf[a-zA-Z0-9\-\_\.]*[\"\']{1,1}[^>]*content\s*=\s*[\"\']{1,1}([a-zA-Z0-9\-\_]+)[\"\']{1,1}')
token = _regex.findall(content)
if token:
return token[0]
_regex = re.compile(
r'(?i)<[^>]*value\s*=\s*[\"\']{1,1}([a-zA-Z0-9\-\_]+)[\"\']{1,1}[^>]*name=[\"\']{1,1}[a-zA-Z0-9\-\_\.]*csrf[a-zA-Z0-9\-\_\.]*[\"\']{1,1}')
token = _regex.findall(content)
if token:
return token[0]
_regex = re.compile(
r'(?i)<[^>]*value\s*=\s*[\"\']{1,1}([a-zA-Z0-9\-\_]+)[\"\']{1,1}[^>]*id=[\"\']{1,1}[a-zA-Z0-9\-\_\.]*csrf[a-zA-Z0-9\-\_\.]*[\"\']{1,1}')
token = _regex.findall(content)
if token:
return token[0]
_regex = re.compile(
r'(?i)<[^>]*content\s*=\s*[\"\']{1,1}([a-zA-Z0-9\-\_]+)[\"\']{1,1}[^>]*name=[\"\']{1,1}[a-zA-Z0-9\-\_\.]*csrf[a-zA-Z0-9\-\_\.]*[\"\']{1,1}')
token = _regex.findall(content)
if token:
return token[0]
# 其次找带csrf的JavaScript参数值
_regex = re.compile(
r'(?i)[a-zA-Z0-9\-\_\.]*csrf[a-zA-Z0-9\-\_\.]*[\'\"]{0,1}\s*[\=\:\,]{1,1}\s*[\"\']{1,1}([a-zA-Z0-9\-\_]+)[\"\']{1,1}')
token = _regex.findall(content)
if token:
return token[0]
_regex = re.compile(
r'(?i)[a-zA-Z0-9\-\_\.]*sec[a-zA-Z0-9\-\_\.]*token[a-zA-Z0-9\-\_\.]*[\'\"]{0,1}\s*[\=\:\,]{1,1}\s*[\"\']{1,1}([a-zA-Z0-9\-\_]+)[\"\']{1,1}')
token = _regex.findall(content)
if token:
return token[0]
_regex = re.compile(
r'(?i)[a-zA-Z0-9\-\_]*token[a-zA-Z0-9\-\_]*[\'\"]{0,1}\s*[\=\:]{1,1}\s*[\"\'\,]{1,1}([a-zA-Z0-9\-\_]+)[\"\']{1,1}')
token = _regex.findall(content)
if token:
return token[0]
return ""
class GenericCSRF(BaseCSRF):
def get_new_token(self):
"""
获取一个可用token
请在子类中覆盖掉本方法
"""
headers = {
'accept-encoding': "gzip, deflate",
'accept-language': "zh-CN,zh;q=0.8,en;q=0.6,it;q=0.4,es;q=0.2",
'user-agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36",
'accept': "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
}
csrf_token = ""
if 'referer' in self.fz.headers:
resp = requests.get(self.fz.headers['referer'], headers=headers, cookies=self.fz.cookies, timeout=5)
csrf_token = self.search_csrf_token(resp.text)
if csrf_token == "":
resp = requests.get(self.fz.url.without_path, headers=headers, cookies=self.fz.cookies, timeout=5)
csrf_token = self.search_csrf_token(resp.text)
if csrf_token == "":
resp = requests.get(self.fz.url.without_query, headers=headers, cookies=self.fz.cookies, timeout=5)
csrf_token = self.search_csrf_token(resp.text)
if csrf_token == "":
logger.error("CSRF not found, url=%s" % self.fz.url)
return csrf_token
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,282 | aploium/my_utils | refs/heads/master | /disk_kv_storge/__init__.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import unicode_literals
import sys
import os
import json
import collections
import tempfile
engines = {}
best_engine = None
try:
try:
from . import leveldb_engine
except (ImportError, ValueError):
# noinspection PyUnresolvedReferences
from disk_kv_storge import leveldb_engine
except ImportError:
raise
else:
best_engine = leveldb_engine
engines[leveldb_engine.NAME] = leveldb_engine
engines["best"] = best_engine
if sys.version_info[0] == 2:
# noinspection PyUnresolvedReferences
str_type = (str, unicode)
else:
str_type = str
def _text_open(file,mode):
if sys.version_info[0] == 2:
return open(file, mode)
else:
return open(file, mode, encoding="utf8")
class BaseDiskKV(collections.MutableMapping):
def __init__(self, db_folder=None, engine=None, auto_delete=None, block_cache_size=8 * (2 << 20)):
if db_folder is None:
self.db_folder = tempfile.mkdtemp(prefix="{}_".format(self.__class__.__name__))
if auto_delete is None:
auto_delete = True
else:
self.db_folder = db_folder
if auto_delete is None:
auto_delete = False
self.auto_delete = auto_delete
if not os.path.exists(self.db_folder):
os.makedirs(self.db_folder)
self._load_meta()
if engine is None and "engine" in self.meta:
engine = self.meta.get("engine")
if engine is None:
engine = engines["best"]
elif isinstance(engine, str_type):
engine = engines[engine]
self.engine = engine
self.meta["engine"] = self.engine.NAME
if self.meta.get("data_path"):
self.data_path = self.meta["data_path"]
else:
self.data_path = os.path.join(self.db_folder, "data")
self.db = engine.open(self.data_path, block_cache_size=block_cache_size)
self._save_meta()
def _load_meta(self, meta_file=None):
if meta_file is None:
meta_file = os.path.join(self.db_folder, "meta.json")
if os.path.exists(meta_file):
meta = json.load(_text_open(meta_file, "r"))
else:
meta = {}
self.meta = meta
return meta
def _save_meta(self, meta_file=None):
if meta_file is None:
meta_file = os.path.join(self.db_folder, "meta.json")
json.dump(self.meta, _text_open(meta_file, "w"), indent=4)
def rawget(self, key):
return self.engine.get(self.db, key)
def __getitem__(self, key):
if self.key_encode is not None:
key = self.key_encode(key)
value = self.rawget(key)
if value is None:
raise KeyError("key {} not exist".format(key))
if self.value_decode is not None:
return self.value_decode(value)
else:
return value
def get(self, key, default=None):
try:
value = self[key]
except KeyError:
return default
return value
def put(self, key, value):
if self.key_encode is not None:
key = self.key_encode(key)
if self.value_encode is not None:
value = self.value_encode(value)
return self.engine.put(self.db, key, value)
def delete(self, key, decode=True):
if self.key_encode is not None and decode:
key = self.key_encode(key)
return self.engine.delete(self.db, key)
def keys(self, decode=True):
if self.key_decode is not None and decode:
return (self.key_decode(x) for x in self.engine.keys(self.db))
else:
return self.engine.keys(self.db)
def values(self):
if self.value_decode is not None:
return (self.value_decode(x) for x in self.engine.values(self.db))
else:
return self.engine.values(self.db)
def items(self):
if self.key_decode is not None:
if self.value_decode is not None:
return ((self.key_decode(k), self.value_decode(v)) for k, v in self.engine.items(self.db))
else:
return ((self.key_decode(k), v) for k, v in self.engine.items(self.db))
else:
if self.value_decode:
return ((k, self.value_decode(v)) for k, v in self.engine.items(self.db))
else:
return self.engine.items(self.db)
def close(self):
return self.engine.close(self.db)
__iter__ = keys
__setitem__ = put
__delitem__ = delete
def __len__(self):
count = 0
for _ in self.keys(decode=False):
count += 1
return count
def __contains__(self, item):
try:
value = self[item]
except KeyError:
return False
else:
return value is not None
def __del__(self):
if self.auto_delete:
self.close()
del self.db
import shutil
shutil.rmtree(self.db_folder)
key_encode = None
key_decode = None
value_encode = None
value_decode = None
class DiskKV(BaseDiskKV):
"""
>>> import tempfile
>>> tempdb_path = tempfile.mkdtemp()
>>> db = DiskKV(tempdb_path)
>>> db.put(b"cat",b"dog")
>>> db.put(b"cat1",b"dog1")
>>> db.put(b"cat2",b"dog2")
>>> db.put(b"cat3",b"dog3")
>>> assert db.get(b"cat1") == b'dog1'
>>> assert db.get(b"cat2") == b'dog2'
>>> db.put(b"cat3",b"monkey")
>>> assert db.get(b"cat3") == b'monkey'
>>> assert frozenset([b"cat",b"cat1",b"cat2",b"cat3"]) == frozenset(x for x in db.keys())
>>> assert frozenset([b"dog",b"dog1",b"dog2",b"monkey"]) == frozenset(x for x in db.values())
>>> assert {b"cat":b"dog",b"cat1":b"dog1",b"cat2":b"dog2",b"cat3":b"monkey"} == {k:v for k,v in db.items()}
>>> db.close()
>>> del db
>>>
>>> db2 = DiskKV(tempdb_path)
>>> assert {b"cat":b"dog",b"cat1":b"dog1",b"cat2":b"dog2",b"cat3":b"monkey"} == {k:v for k,v in db2.items()}
"""
key_decode = bytes
value_decode = bytes
try:
from .disk_timeoutdict import DiskTimeoutDict
except (ImportError, ValueError):
# noinspection PyUnresolvedReferences
from disk_kv_storge.disk_timeoutdict import DiskTimeoutDict
try:
from .jsondiskkv import JsonDiskKV
except (ImportError, ValueError):
# noinspection PyUnresolvedReferences
from disk_kv_storge.jsondiskkv import JsonDiskKV
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,283 | aploium/my_utils | refs/heads/master | /requestfuzz/recursive_parse.py | # coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import *
import six
import collections
import json
import string
import copy
import cgi
import weakref
import base64
import re
from io import BytesIO
if six.PY3:
from urllib import parse
else:
from future.backports.urllib import parse
from .datastructure import QueryDict, to_querydict
from . import utils
__all__ = ["BaseNode", "FormNode", "JSONNode", "JSONPNode", "PlainNode",
"UrlEncodeNode", "Base64EncodeNode", "load", "ENABLED_NODES"]
def _is_json_iterable(obj):
if isinstance(obj, six.string_types) \
or isinstance(obj, six.integer_types) \
or obj in (True, False, None):
return False
elif utils.like_dict(obj) or utils.like_list(obj):
return True
else:
raise TypeError("type: {} is not an iterable json type".format(type(obj)))
def _key_concat(parent, key):
"""
Args:
parent (tuple):
key (str):
Returns:
tuple:
"""
if not parent:
return key
else:
if isinstance(key, list):
key = tuple(key)
elif isinstance(key, six.string_types):
key = (key,)
return parent + key
def recursive_iter(js, parent=None):
"""递归遍历json中的key-value, 深度优先"""
if not _is_json_iterable(js):
yield None, js
elif utils.like_dict(js):
for k, v in js.items():
# 类似 "foo.bar.cat" 的key的绝对路径
abs_key = _key_concat(parent, k)
yield abs_key, v
elif utils.like_list(js):
for index, v in enumerate(js):
abs_key = _key_concat(parent, "{}".format(index)) # <list-index-{}>"{}".format(index)
yield abs_key, v
else:
# you should never be here!
raise TypeError("type: {} is not an iterable json type".format(type(js)))
def parse_multipart(data, content_type):
"""
Args:
data (bytes): multipart的二进制
content_type (headers中的 Content-Type):
里面包含切分multipart所需的 boundary
一般像这样:
multipart/form-data; boundary=----WebKitFormBoundaryEW35oPYWK6qwibcP
Returns:
dict[str, cgi.FieldStorage] | QueryDict: key-value
"""
environ = {
"QUERY_STRING": "", # 需要有, 但是没实际作用
"REQUEST_METHOD": "POST", # 同上, 必须为POST
"CONTENT_TYPE": content_type, # 里面需要包含 boundary
"CONTENT_LENGTH": len(data),
}
fs = cgi.FieldStorage(
fp=BytesIO(data),
environ=environ,
keep_blank_values=True,
)
data = fs.list or []
multipart = QueryDict()
for item in data: # type: cgi.FieldStorage
if item.filename:
multipart[item.name] = item
else:
multipart[item.name] = item.value
return multipart
def split_multipart(multipart):
"""
从multipart中切分出普通参数和文件
Args:
multipart(dict[str, str|cgi.FieldStorage]): multipart的dict
Returns:
tuple[QueryDict, dict[str, cgi.FieldStorage]]:
两个字典: form, files 分别是 multipart 中的普通form参数和文件
"""
form = QueryDict()
files = QueryDict()
for name, item in multipart.items():
if isinstance(item, cgi.FieldStorage):
files[name] = item
else:
form[name] = item
return form, files
@six.python_2_unicode_compatible
class BaseNode(object):
"""
节点树
Args:
parent(BaseNode|None): 父节点
children(list[BaseNode]):
叶子结点, 如果是 None 的话表示尚未执行叶子结点的生成
如果是 list (即视为空list) 表示已经执行过叶子结点的生成
这有助于进行lazy的叶子结点计算
允许对它进行直接的修改, 例如删除一些不需要的节点
Methods:
text(str): property, 整棵树的文本表示
"""
type = "base"
def __init__(self, data,
parent=None, text=None,
key=None, index_in_parent=None):
"""
Args:
data(QueryDict): 解析后的data
parent(BaseNode): 父节点
key(str): 这个node在parent中的key名
"""
self.data = data
self.key = key if parent else "<root>"
self._index_in_parent = index_in_parent
self._text_cache = text or None
self.children = None # type: list[BaseNode]
self.parent = parent
def refresh_children(self):
"""
根据data计算并生成子节点, 与 refresh_data() 是相反的操作
data --> children
注意: 只生成一层子节点, 重建整棵树需要调用 gen_tree()
"""
raise NotImplementedError
def refresh_data(self):
"""
根据children重新生成data, 与 refresh_children() 是相反的操作
children --> data
把子节点的修改应用到根节点(整棵树)上
修改子节点后需要对根调用 .refresh_data()
此操作是深度优先递归进行的
"""
raise NotImplementedError
def rebuild_text(self):
"""
根据data生成text
"""
raise NotImplementedError
@property
def text(self):
if self._text_cache is None:
self.rebuild_text()
return self._text_cache
@text.setter
def text(self, value):
if not isinstance(self, PlainNode):
raise NotImplementedError
self.data = value
self.rebuild_text()
parent_node = self.parent
while parent_node is not None:
parent_node.refresh_data()
parent_node.rebuild_text()
parent_node = parent_node.parent
@property
def index_in_parent(self):
"""
返回自身在父节点中的序号
若不存在或没有父节点则返回 -1
如果没有人为设置, 则第一次调用时会尝试自动获取这个序号
序号缓存在 self._index_in_parent 中
Returns:
int
"""
if self.parent is None:
return -1
if self._index_in_parent is None:
# 自动寻找自身在父节点中的位置
self._index_in_parent = self.parent.children.index(self)
return self._index_in_parent
@index_in_parent.setter
def index_in_parent(self, value):
self._index_in_parent = value
@property
def depth(self):
if self.parent is None:
return 0
else:
return self.parent.depth + 1
@property
def root(self):
"""返回此节点的根节点"""
node = self
while node.parent is not None:
node = node.parent
return node
def reload(self, value, key=NotImplemented, plain=True):
if key is NotImplemented:
key = self.key
if not plain:
factory = find_proper_node
else:
factory = PlainNode
new_node = factory(
value, key=key, parent=self.parent,
index_in_parent=self.index_in_parent,
)
new_node.gen_tree()
current_node = new_node
parent_node = new_node.parent
while parent_node is not None:
parent_node.children[current_node.index_in_parent] = current_node
parent_node.refresh_data()
parent_node.rebuild_text()
current_node = parent_node
parent_node = parent_node.parent
return new_node
def copy(self):
"""生成此节点及以下树的副本"""
new = copy.copy(self)
new.gen_tree()
return new
def fork_tree(self, value, **kwargs):
"""
生成树的副本, 并在新树中的对应节点应用修改
对原树不做修改
Returns:
BaseNode: 返回新树的根节点
"""
new_root = self.root.copy()
# 修改新树中此节点对应的节点
my_mirror = new_root.get_by_abskey(self.abskey)
my_mirror.reload(value, **kwargs)
return new_root
def gen_tree(self):
"""计算并生成整棵树
等效于递归生成子节点
"""
self.refresh_children()
self.rebuild_text()
for child in self.children:
child.gen_tree()
def get_by_abskey(self, abskey):
"""在树中根据 abskey 获取子节点"""
if self.abskey == abskey:
return self
for node in self.children:
if abskey[:len(node.abskey)] == node.abskey:
return node.get_by_abskey(abskey)
raise KeyError("key {} not found".format(abskey))
def iter_all_leaves(self):
"""
广度优先遍历树中的所有叶子结点
Yields:
PlainNode: 各个节点, 所有叶子结点总是 PlainNode
"""
for node in self.iter_tree():
if isinstance(node, PlainNode):
yield node
def iter_tree(self):
"""
广度优先遍历整棵树
Yields:
BaseNode: 各个节点, 所有叶子结点总是 PlainNode
"""
if self.children is None:
# lazy 生成树
self.gen_tree()
# pycharm: 这里的warning是pycharm的bug, 下同
for child in self.children:
yield child
for child in self.children:
# 等效于 `yield from child.iter_tree()`
# 不过py2不支持这个这个语法
for ch in child.iter_tree():
yield ch
@property
def abskey(self):
"""
返回相对于根的绝对key路径
此绝对路径是一个 tuple
Examples:
("<root>", "foo#1", "bar#7", "#3")
"""
if self.parent is None:
return ("<root>",)
else:
return _key_concat(
self.parent.abskey,
"{}#{}".format(self.key, self.index_in_parent)
) # , escape=False)
def __getitem__(self, key):
for child in self.children:
if child.key == key:
return child
raise KeyError("key {} not found".format(key))
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __iter__(self):
return iter(self.iter_tree())
def __str__(self):
return "{klass}<key={key} parent={parent} depth={depth} data={data}>".format(
klass=self.__class__.__name__,
key=repr(self.key),
parent=repr(self.parent.key) if self.parent else None,
depth=self.depth,
data=repr(self.data),
)
__repr__ = __str__
@classmethod
def load(cls, data, **kwargs):
"""
尝试加载data
Returns:
None|BaseNode:
如果不合法, 则返回 None (预期内的加载失败不抛出异常)
如果合法, 则返回一个自身的实例
"""
return cls(data, **kwargs)
class FormNode(BaseNode):
type = "form"
def refresh_children(self):
self.children = []
for key, data in self.data.items():
child = find_proper_node(data, key=key, parent=self)
self.children.append(child)
def refresh_data(self):
data = [(child.key, child.text) for child in self.children]
self.data = to_querydict(data)
def rebuild_text(self):
self._text_cache = parse.urlencode(self.data)
@classmethod
def load(cls, data, **meta):
"""
Examples:
>>> form_str = r'cat=233&foo=bar'
>>> node = FormNode.load(form_str)
>>> node.data
QueryDict([('cat', '233'), ('foo', 'bar')])
>>> node.children()
(ChildNode(key='cat', data='233', depth=1),
ChildNode(key='foo', data='bar', depth=1))
"""
if not isinstance(data, six.string_types):
return None
try:
data = utils.ensure_unicode(data)
except:
return None
if not ("=" in data
and "&" in data
or data.count("=") == 1
):
return None
data = utils.ensure_unicode(data)
try:
query = to_querydict(data)
except:
return None
else:
return super(FormNode, cls).load(query, text=data, **meta)
class JSONNode(BaseNode):
type = "json"
def refresh_children(self):
self.children = []
for key, data in recursive_iter(self.data):
child = find_proper_node(data, key=key, parent=self)
self.children.append(child)
def refresh_data(self):
if utils.like_dict(self.data):
self.data = {}
for child in self.children:
if isinstance(child, JSONNode):
self.data[child.key] = child.data
else:
self.data[child.key] = child.text
else:
self.data = []
for child in self.children:
if isinstance(child, JSONNode):
self.data.append(child.data)
else:
self.data.append(child.text)
def rebuild_text(self):
self._text_cache = json.dumps(self.data, sort_keys=False, ensure_ascii=False)
@classmethod
def load(cls, data, **meta):
"""
Examples:
>>> js_str = r'{"cat":"dog","x":["1","2",{"x":false}]}'
>>> node = JSONNode.load(js_str)
>>> node.data
{'cat': 'dog', 'x': ['1', '2', {'x': False}]}
>>> node.children()
(ChildNode(key='cat', data='dog', depth=1),
ChildNode(key=0, data='1', depth=1),
ChildNode(key=1, data='2', depth=1),
ChildNode(key='x', data=False, depth=1))
"""
# 如果传入的是字符串
if isinstance(data, six.string_types):
try:
data = utils.ensure_unicode(data)
except:
return None
data = data.strip() # 去除前后空格
# 初步判断它是不是json
if '"' not in data:
return None
if not (data.startswith("{") and data.endswith("}") and ":" in data
or data.startswith("[") and data.endswith("]")
):
return None
# 大概是json, 尝试解析
data = utils.ensure_unicode(data)
try:
data_json = json.loads(data)
except:
return None
elif utils.like_dict(data) or utils.like_list(data):
# 传进来的本来就是json, 不进一步处理
data_json = data
else:
return None # 未知格式
return super(JSONNode, cls).load(data_json, **meta)
class JSONPNode(JSONNode):
type = "jsonp"
_JSONP_PREFIX_CHARS = set(string.ascii_letters + string.digits + "_")
def __init__(self, data, prefix=None, suffix=None, **kwargs):
self.prefix = prefix
self.suffix = suffix
super(JSONPNode, self).__init__(data, **kwargs)
def rebuild_text(self):
self._text_cache = "{}{}{}".format(
self.prefix,
json.dumps(self.data, sort_keys=False, ensure_ascii=False),
self.suffix
)
@classmethod
def load(cls, data, **meta):
"""
Examples:
>>> jp_str = '_callback({"cat":"dog","x":["1","2",{"x":false}]})'
>>> node = JSONPNode.load(jp_str)
>>> node.data
{'cat': 'dog', 'x': ['1', '2', {'x': False}]}
"""
if not isinstance(data, six.string_types):
return None
try:
data = utils.ensure_unicode(data)
except:
return None
data = data.strip()
if not data.endswith(")") and not data.endswith(");"):
return None
# 验证是 callback(.....) 的格式
lpos = data.find("(")
if lpos == -1:
return None
if set(data[:lpos]).difference(cls._JSONP_PREFIX_CHARS):
return None
rpos = data.rfind(")")
json_str = data[lpos + 1:rpos] # jsonp里的json本体
meta["prefix"] = data[:lpos + 1]
meta["suffix"] = data[rpos:]
return super(JSONPNode, cls).load(json_str, **meta)
class UrlEncodeNode(BaseNode):
"""经过urlencode的node"""
type = "urlencode"
def refresh_data(self):
self.data = self.children[0].text
def rebuild_text(self):
self._text_cache = parse.quote(self.data)
def refresh_children(self):
self.children = [
find_proper_node(self.data, key="urlencode", parent=self)]
@classmethod
def load(cls, data, **kwargs):
if not isinstance(data, six.string_types):
return None
# 判断是否为urlencode
try:
data = utils.ensure_unicode(data)
if '%' not in data:
return None
if parse.quote(parse.unquote(data)) != data:
return None
except:
return None
decoded_data = parse.unquote(data)
return super(UrlEncodeNode, cls).load(decoded_data, text=data, **kwargs)
class Base64EncodeNode(BaseNode):
"""经过base64的node"""
type = "base64"
def refresh_data(self):
self.data = self.children[0].text
def rebuild_text(self):
_new = self.data
if isinstance(_new, six.text_type): # sentry #1914
_new = _new.encode("utf-8")
_new = base64.b64encode(_new)
if isinstance(_new, six.binary_type):
_new = _new.decode("ascii")
self._text_cache = _new
def refresh_children(self):
self.children = [
find_proper_node(self.data, key="base64", parent=self)]
@classmethod
def load(cls, data, **kwargs):
if not isinstance(data, six.string_types):
return None
try:
data = utils.ensure_unicode(data)
except:
return None
if not re.match(r'^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{4}|[A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{2}==)$', data):
return None
# 判断是否为base64
try:
_new = base64.b64encode(base64.b64decode(data))
if six.PY3:
_new = _new.decode("ascii")
if _new != data:
return None
decoded_data = base64.b64decode(data)
if six.PY3:
decoded_data = decoded_data.decode("utf-8")
# TODO 可以进一步判断是不是base64
except Exception as e:
return None
return super(Base64EncodeNode, cls).load(decoded_data, text=data, **kwargs)
class PlainNode(BaseNode):
"""纯文本的node"""
type = "plain"
def refresh_data(self):
pass
def rebuild_text(self):
self._text_cache = self.data
def refresh_children(self):
self.children = [] # PlainNode永远没有子节点
@classmethod
def load(cls, data, **kwargs):
return super(PlainNode, cls).load(data, **kwargs)
ENABLED_NODES = [
Base64EncodeNode,
UrlEncodeNode,
JSONPNode,
JSONNode,
FormNode,
PlainNode, # fallback, PlainNode一定匹配成功的
]
def find_proper_node(
data,
key=None, parent=None, index_in_parent=None,
enabled_nodes=ENABLED_NODES,
):
for node_cls in enabled_nodes:
node = node_cls.load(
data,
key=key, parent=parent,
index_in_parent=index_in_parent,
)
if node is not None:
return node
# you should never be here
raise ValueError("unable to decode data: {}".format(data))
def load(data, recursive=True, **meta):
"""
解析并加载节点
已支持格式: Forms/JSON/JSONP
待添加: XML/BASE64
Args:
data (str): 待解析的字符串
recursive (bool): 是否递归解析,
若为 False 则只解析最顶层的一个node
meta (dict[str, int|str]): meta信息, 多数情况下不需要用到
Returns:
BaseNode: 每一层解析出来的Node
"""
root = find_proper_node(data, **meta)
if recursive:
root.gen_tree()
return root
# ---------- 下面是一些方便的辅助函数 -------------
def is_json_or_jsonp(text):
"""
判断文本是否是json或者jsonp
如果是json, 返回 "json" (字符串)
如果是jsonp, 返回 "jsonp"
如果都不是, 返回 None
"""
try:
node = find_proper_node(
text, enabled_nodes=[JSONPNode, JSONNode]
)
if isinstance(node, JSONPNode):
return "jsonp"
else:
return "json"
except:
return None
def main():
js = {
"monkey": "cat",
"aform": parse.urlencode([
("choice", 17),
("choice", 18),
("choice", parse.quote("test+1fsf")),
("choice", "ZnNmc2Q="),
("json", json.dumps({"chained": "json2", "aaa": "bbb"}))
]),
"foo": ["b", "a", {"b": "c", "d": "e"}, "this=is&a=form"],
"bar": False,
"ttt": None,
"key-with.a-dot": "value-with.a-dot",
"中文": "中文"
}
nested = r"""_callback({})""".format(json.dumps(js))
print("ori:", nested, "\n\n")
root_node = load(nested)
for node in root_node.iter_tree():
print(" " * node.depth * 4,
"type:", node.type,
"| key:", repr(node.key),
"| abskey:", repr(node.abskey),
"| data:", repr(node.data),
"| text:", repr(node.text),
"| depth:", node.depth,
"| index_in_parent:", node.index_in_parent
)
if node.text == "17":
node.reload("FFFFFF")
if node.text == 'b':
node.data = "IIIIII"
new_node = node.reload("IIIIII")
if node.text == 'is':
node.data = "RRRRRR"
node.reload("RRRRRR")
if node.data == 'value-with.a-dot':
node.data = "EEEEEE"
new_node = node.reload('''["IIIIII", "a", {"b": "c", "d": "e"}, "this=RRRRRR&a=form"]''')
print(new_node.key)
print(new_node.abskey)
print(new_node.text)
print("gen:", root_node.text, "\n\n")
for node in root_node.iter_all_leaves():
print(" " * node.depth * 4,
"type:", node.type,
"| key:", repr(node.key),
"| abskey:", repr(node.abskey),
"| data:", repr(node.data),
"| text:", repr(node.text),
"| depth:", node.depth)
node.text = "firesun"
print("gen:", root_node.text, "\n\n")
for node in root_node.iter_tree():
print(" " * node.depth * 4,
"type:", node.type,
"| key:", repr(node.key),
"| abskey:", repr(node.abskey),
"| data:", repr(node.data),
"| text:", repr(node.text),
"| depth:", node.depth)
node.reload("firesun")
print("gen:", root_node.text, "\n\n")
if __name__ == '__main__':
main()
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,284 | aploium/my_utils | refs/heads/master | /calling_statistic.py | #!/usr/bin/env python3
# coding=utf-8
"""
调用速率统计
支持py3与py2
"""
from __future__ import print_function, division
import time
import functools
import os
try:
from collections import Callable
except:
pass
def calling_static(period=1.0, printer=print, timer=None,
max_qps=None, qps_resolution=0.05):
"""
:param period: 打印周期
:type period: float
:param printer: 打印函数
:type printer: Callable
:param timer: 计时器
:type timer: Callable
:param max_qps: 每秒最大调用次数, 用sleep限速
:type max_qps: int
:param qps_resolution: qps统计的解析间隔, 单位是秒
:type qps_resolution: float
"""
def dec(func):
if timer is None:
try:
_timer = time.perf_counter
except:
_timer = time.time
else:
_timer = timer
_record = {"checkpoint_spd": _timer(), "count_spd": 0, "total": 0}
if max_qps:
_record.update({"checkpoint_qps": _timer(), "count_qps": _timer()})
start_time = _record["checkpoint_spd"] # type: float
@functools.wraps(func)
def _func(*args, **kwargs):
# -------
result = func(*args, **kwargs)
# -------
now = _timer() # type: float
_record["count_spd"] += 1
if max_qps:
_record["count_qps"] += 1
_record["total"] += 1
if now - _record["checkpoint_spd"] > period:
printer("Timer:func:%s T+%0.3fs Tot:%d Spd:%0.2f/s PID:%d" % (
func.__name__, now - start_time, _record["total"],
_record["count_spd"] / (now - _record["checkpoint_spd"]),
os.getpid()
))
_record["checkpoint_spd"] = now
_record["count_spd"] = 0
if max_qps:
if (now - _record["checkpoint_qps"]) * max_qps < _record["count_qps"]:
time.sleep(now + qps_resolution - _record["checkpoint_qps"])
_record["count_qps"] = 0
_record["checkpoint_qps"] = now
if now - _record["checkpoint_qps"] > qps_resolution:
_record["checkpoint_qps"] = now
_record["count_qps"] = 0
return result
return _func
return dec
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,291 | bmaximuml/groups | refs/heads/master | /models.py | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
properties = db.Table('properties',
db.Column('group_id', db.Integer, db.ForeignKey('group.id'), primary_key=True),
db.Column('property_id', db.Integer, db.ForeignKey('property.id'), primary_key=True),
db.Column('value', db.String(50), nullable=True)
)
class Group(db.Model):
__tablename__ = 'group'
id = db.Column(db.Integer, primary_key=True, nullable=False)
name = db.Column(db.String(100), nullable=False)
description = db.Column(db.Text, nullable=False)
properties = db.relationship(
'Property', secondary=properties, lazy='subquery', backref=db.backref('groups, lazy=True')
)
class Property(db.Model):
__tablename__ = 'property'
id = db.Column(db.Integer, primary_key=True, nullable=False)
name = db.Column(db.String(50), nullable=True)
| {"/application.py": ["/exceptions.py", "/models.py"], "/setup.py": ["/exceptions.py"]} |
54,292 | bmaximuml/groups | refs/heads/master | /exceptions.py | class EnvironmentUnsetError(Exception):
def __init__(self, *variables):
self.variables = variables
def __str__(self):
return f"Environment variable(s) not set: {repr(self.variables)}"
| {"/application.py": ["/exceptions.py", "/models.py"], "/setup.py": ["/exceptions.py"]} |
54,293 | bmaximuml/groups | refs/heads/master | /application.py | from datetime import datetime
from email.message import EmailMessage
from exceptions import EnvironmentUnsetError
from flask import Flask, render_template
from os import environ
from smtplib import SMTP_SSL
from wtforms import Form, StringField, SubmitField, TextAreaField
from wtforms.fields.html5 import EmailField
from wtforms.validators import DataRequired, Email, length
from models import db, Group
def create_application():
app = Flask(__name__)
try:
app.secret_key = environ['WFB_FLASK_SECRET_KEY']
except KeyError:
raise EnvironmentUnsetError('WFB_FLASK_SECRET_KEY')
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///groups.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app=app)
db.create_all(app=app)
return app
application = create_application()
class ContactForm(Form):
name = StringField(
'Name',
validators=[DataRequired(), length(max=200)],
render_kw={
"placeholder": "Name",
"class": "input",
"maxlength": 200
}
)
email = EmailField(
'Email Address',
validators=[
DataRequired(),
Email(message="Invalid email address"),
length(max=200)
],
render_kw={
"placeholder": "Email",
"class": "input",
"maxlength": 200
}
)
message = TextAreaField(
'Message',
validators=[DataRequired(), length(max=5000)],
render_kw={
"placeholder": "Enter your message here...",
"class": "textarea",
"rows": 5,
"maxlength": 5000
}
)
submit = SubmitField(
'Send',
render_kw={
"class": "button is-link"
}
)
@application.route('/', methods=['POST', 'GET'])
def home():
all_groups = Group.query.order_by(Group.name).all()
return render_template(
'index.html',
all_groups=all_groups,
title=environ['WFB_PROJECT_NAME'],
year=datetime.now().year,
)
def send_message(name, email, message):
required_env_vars = [
'WFB_SMTP_HOST',
'WFB_SMTP_PORT',
]
for var in required_env_vars:
if var not in environ:
raise EnvironmentUnsetError(var)
msg = EmailMessage()
msg.set_content(message)
msg['Subject'] = name + ' - {} Contact Form'.format(environ['WFB_PROJECT_NAME'])
msg['From'] = email
if 'WFB_SMTP_TARGET' in environ:
msg['To'] = environ['WFB_SMTP_TARGET']
elif 'WFB_SITE_URL' in environ:
msg['To'] = 'contactform@{}'.format(environ['WFB_SITE_URL'])
else:
raise EnvironmentUnsetError('WFB_SMTP_TARGET', 'WFB_SITE_URL')
sender = SMTP_SSL(
environ['WFB_SMTP_HOST'],
environ['WFB_SMTP_PORT'],
environ['WFB_SITE_URL']
)
if 'WFB_SMTP_USERNAME' in environ and 'WFB_SMTP_PASSWORD' in environ:
sender.login(
environ['WFB_SMTP_USERNAME'],
environ['WFB_SMTP_PASSWORD']
)
sender.send_message(msg)
sender.quit()
if __name__ == '__main__':
application.debug = True
application.run()
| {"/application.py": ["/exceptions.py", "/models.py"], "/setup.py": ["/exceptions.py"]} |
54,294 | bmaximuml/groups | refs/heads/master | /setup.py | from os import environ
from setuptools import setup, find_packages
from exceptions import EnvironmentUnsetError
required_env_vars = [
'WFB_PROJECT_NAME',
'WFB_FLASK_SECRET_KEY'
]
for var in required_env_vars:
if var not in environ:
raise EnvironmentUnsetError(var)
optional_setup = {}
if 'WFB_AUTHOR_NAME' in environ:
optional_setup['author'] = environ['WFB_AUTHOR_NAME']
if 'WFB_AUTHOR_EMAIL' in environ:
optional_setup['author_email'] = environ['WFB_AUTHOR_EMAIL']
if 'WFB_AUTHOR_NAME' in environ:
optional_setup['url'] = environ['WFB_PROJECT_URL']
setup(
name=environ['WFB_PROJECT_NAME'],
version='0.1',
long_description=__doc__,
packages=find_packages(),
include_package_data=True,
zip_safe=True,
install_requires=[
'datetime',
'Flask',
'Flask-SQLAlchemy',
'mysql-connector-python',
'WTForms'
],
**optional_setup
)
| {"/application.py": ["/exceptions.py", "/models.py"], "/setup.py": ["/exceptions.py"]} |
54,296 | jagheterfredrik/sheet | refs/heads/master | /sheet/shargh.py | import sheet.server
import argh
from argparse import ArgumentParser
from argh.assembling import add_commands
class ShargumentParserExit(Exception):
def __init__(self, status, message):
super(ShargumentParserExit, self).__init__(self)
self.status = status
self.message = message
class ShargumentParser(ArgumentParser):
def __init__(self, *args, **kwargs):
if 'clear_prog' in kwargs and kwargs['clear_prog'] is not False:
kwargs['prog'] = ''
if 'output' in kwargs:
self.output = kwargs['output']
del kwargs['output']
super(ShargumentParser, self).__init__(*args, **kwargs)
def exit(self, status=0, message=None):
raise ShargumentParserExit(status, message)
def error(self, message):
self.output.write(message+'\n')
def print_help(self):
self.output.write(self.format_help())
def serve_commands(commands, config=None):
def cb(args, infile, outfile, errfile):
parser = ShargumentParser(output=outfile, formatter_class=argh.constants.PARSER_FORMATTER)
argh.assembling.add_commands(parser, commands, func_kwargs={'output': outfile})
status = 0
try:
if args:
args = args.split(' ')
else:
args = []
argh.dispatch(parser, argv=args, output_file=outfile, errors_file=errfile)
except ShargumentParserExit, ex:
status = ex.status
except Exception, e:
print e
status = 1
return status
sheet.server.Server(cb, config=config).start()
| {"/sheet/auth/base.py": ["/sheet/auth/broker.py"], "/sheet/auth/ldappubkey.py": ["/sheet/auth/__init__.py", "/sheet/auth/broker.py"], "/sheet/auth/pubkeydirectory.py": ["/sheet/auth/__init__.py", "/sheet/auth/broker.py"]} |
54,297 | jagheterfredrik/sheet | refs/heads/master | /sheet/auth/base.py | import paramiko
from sheet.auth.broker import Broker
@Broker.register
class BaseAuthHandler(paramiko.ServerInterface):
"Provides base class for authentication, never use directly."
__id__ = 'BaseAuth'
def __init__(self):
self.command = None
def check_channel_request(self, kind, channel):
if kind == 'session':
return paramiko.OPEN_SUCCEEDED
return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
def check_channel_shell_request(self, channel):
return True
def check_channel_exec_request(self, channel, command):
self.command = command
return True
def check_auth_publickey(self, username, key):
if False:
return paramiko.AUTH_SUCCESSFUL
return paramiko.AUTH_FAILED
def get_allowed_auths(self, username):
return 'publickey'
| {"/sheet/auth/base.py": ["/sheet/auth/broker.py"], "/sheet/auth/ldappubkey.py": ["/sheet/auth/__init__.py", "/sheet/auth/broker.py"], "/sheet/auth/pubkeydirectory.py": ["/sheet/auth/__init__.py", "/sheet/auth/broker.py"]} |
54,298 | jagheterfredrik/sheet | refs/heads/master | /sheet/auth/broker.py | class AuthBroker(object):
def __init__(self):
self.providers = {}
def register(self, classdef):
name = classdef.__id__
if name in self.providers:
print 'Provider', name, 'defined more than once'
self.providers[name] = classdef
return classdef
def get(self, name):
if not name in self.providers:
print 'The configured provider does not exist'
return self.providers[name]
Broker = AuthBroker()
| {"/sheet/auth/base.py": ["/sheet/auth/broker.py"], "/sheet/auth/ldappubkey.py": ["/sheet/auth/__init__.py", "/sheet/auth/broker.py"], "/sheet/auth/pubkeydirectory.py": ["/sheet/auth/__init__.py", "/sheet/auth/broker.py"]} |
54,299 | jagheterfredrik/sheet | refs/heads/master | /sheet/util.py | def constant_time_compare(val1, val2):
"""Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
Taken from Django Source Code
"""
if len(val1) != len(val2):
return False
result = 0
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0
| {"/sheet/auth/base.py": ["/sheet/auth/broker.py"], "/sheet/auth/ldappubkey.py": ["/sheet/auth/__init__.py", "/sheet/auth/broker.py"], "/sheet/auth/pubkeydirectory.py": ["/sheet/auth/__init__.py", "/sheet/auth/broker.py"]} |
54,300 | jagheterfredrik/sheet | refs/heads/master | /sheet/server.py | import socket
import sys
import traceback
import paramiko
import threading
import yaml
from paramiko.rsakey import RSAKey
from sheet.auth.broker import Broker
from sheet.auth import *
class Server():
def __init__(self, cb, config=None, address='', port=58337, backlog=100):
self.cb = cb
# Parse config <3
if config is not None:
with open(config, 'r') as f:
cfg = yaml.load(f)
else:
cfg = {}
logfile = cfg.get('logfile', None)
if logfile is not None:
paramiko.util.log_to_file(logile)
host_key_path = cfg.get('host_key', 'server.key')
host_key_password = cfg.get('host_key_password', None)
try:
self.host_key = RSAKey.from_private_key_file(host_key_path, host_key_password)
except paramiko.ssh_exception.PasswordRequiredException:
print 'Invalid host_key_password'
sys.exit(1)
except IOError:
print '*****************************************'
print '** host_key does not exists! **'
print '** In the name of security by default, **'
print '** Sheet will generate one for you. **'
print '*****************************************'
RSAKey.generate(2048).write_private_key_file(host_key_path, host_key_password)
self.handler = Broker.get(cfg.get('auth_handler', 'BaseAuth'))
self.handler_conf = cfg.get('auth_handler_config', {})
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind((address, port))
except Exception as e:
print 'Bind failed: ', str(e)
traceback.print_exc()
sys.exit(1)
try:
self.socket.listen(backlog)
except Exception as e:
print 'Listen/accept failed:', str(e)
traceback.print_exc()
sys.exit(1)
def start(self):
while True:
try:
client, addr = self.socket.accept()
ServerThread(client, addr, self.cb, self.host_key, self.handler, self.handler_conf).start()
except KeyboardInterrupt:
self.socket.close()
break
class ServerThread(threading.Thread):
def __init__(self, client, addr, cb, host_key, handler, handler_conf):
super(ServerThread, self).__init__(name='SheetServerThread')
self.client = client
self.addr = addr
self.cb = cb
self.host_key = host_key
self.handler = handler
self.handler_conf = handler_conf
def run(self):
t = paramiko.Transport(self.client, gss_kex=False)
t.add_server_key(self.host_key)
#handler = LdapPubkeyAuthHandler()
handler = self.handler(**self.handler_conf)
try:
t.start_server(server=handler)
except paramiko.SSHException:
return
except EOFError:
return
chan = t.accept(10)
if chan is None:
return
infile = chan.makefile('r')
outfile = chan.makefile('w')
errfile = chan.makefile_stderr('w')
status = self.cb(handler.command, infile, outfile, errfile)
chan.send_exit_status(status)
chan.shutdown(2)
chan.close()
t.close()
| {"/sheet/auth/base.py": ["/sheet/auth/broker.py"], "/sheet/auth/ldappubkey.py": ["/sheet/auth/__init__.py", "/sheet/auth/broker.py"], "/sheet/auth/pubkeydirectory.py": ["/sheet/auth/__init__.py", "/sheet/auth/broker.py"]} |
54,301 | jagheterfredrik/sheet | refs/heads/master | /sheet/auth/ldappubkey.py | import paramiko
import ldap
from sheet.auth import base
from sheet.auth.broker import Broker
from sheet import util
@Broker.register
class LdapPubkeyAuthHandler(base.BaseAuthHandler):
"""Auth based on public keys stored in LDAP.
Anonymously binds to LDAP in order to look up the public key for
the connecting username and matches against the supplied key.
"""
__id__ = 'LdapPubkeyAuth'
def __init__(self, host, port, base_dn, username_field, pubkey_field):
super(LdapPubkeyAuthHandler, self).__init__()
self.host = host
self.port = port
self.base_dn = base_dn
self.username_field = username_field
self.pubkey_field = pubkey_field
self.ldap = ldap.initialize(self.host)
self.ldap.simple_bind()
def check_auth_publickey(self, username, key):
uid, res = self.ldap.search_s(self.base_dn, ldap.SCOPE_SUBTREE, self.username_field+'='+username)[0]
if self.pubkey_field in res:
for entry in res[self.pubkey_field]:
ldapkey = entry.split(' ')
if util.constant_time_compare(key.get_base64(), ldapkey[1]):
return paramiko.AUTH_SUCCESSFUL
return paramiko.AUTH_FAILED
| {"/sheet/auth/base.py": ["/sheet/auth/broker.py"], "/sheet/auth/ldappubkey.py": ["/sheet/auth/__init__.py", "/sheet/auth/broker.py"], "/sheet/auth/pubkeydirectory.py": ["/sheet/auth/__init__.py", "/sheet/auth/broker.py"]} |
54,302 | jagheterfredrik/sheet | refs/heads/master | /test.py | from sheet import shargh
import time
def main(name='unknown user'):
return 'Hello {0}!'.format(name)
def work():
yield 'Working...'
time.sleep(2)
yield 'DONE!'
return
shargh.serve_commands([main, work], config='test_conf.yaml')
| {"/sheet/auth/base.py": ["/sheet/auth/broker.py"], "/sheet/auth/ldappubkey.py": ["/sheet/auth/__init__.py", "/sheet/auth/broker.py"], "/sheet/auth/pubkeydirectory.py": ["/sheet/auth/__init__.py", "/sheet/auth/broker.py"]} |
54,303 | jagheterfredrik/sheet | refs/heads/master | /sheet/auth/pubkeydirectory.py | import paramiko
from os.path import isfile, normpath, join
from sheet.auth import base
from sheet.auth.broker import Broker
from sheet import util
@Broker.register
class PubkeyDirectoryAuthHandler(base.BaseAuthHandler):
"""Allows public keys in a specified filesystem directory where
the filename matches the username.
"""
__id__ = 'PubkeyDirectoryAuth'
def __init__(self, path):
super(PubkeyDirectoryAuthHandler, self).__init__()
self.path = path
def check_auth_publickey(self, username, key):
sanitized_username = normpath('/' + username).lstrip('/')
keypath = join(self.path, sanitized_username)
if isfile(keypath):
with open(keypath, 'r') as f:
filekey = f.read().split(' ')
if util.constant_time_compare(key.get_base64(), filekey[1]):
return paramiko.AUTH_SUCCESSFUL
return paramiko.AUTH_FAILED
| {"/sheet/auth/base.py": ["/sheet/auth/broker.py"], "/sheet/auth/ldappubkey.py": ["/sheet/auth/__init__.py", "/sheet/auth/broker.py"], "/sheet/auth/pubkeydirectory.py": ["/sheet/auth/__init__.py", "/sheet/auth/broker.py"]} |
54,304 | jagheterfredrik/sheet | refs/heads/master | /sheet/auth/__init__.py | __all__ = ['base', 'broker', 'ldappubkey', 'spotifypubkeydirectory', 'pubkeydirectory']
| {"/sheet/auth/base.py": ["/sheet/auth/broker.py"], "/sheet/auth/ldappubkey.py": ["/sheet/auth/__init__.py", "/sheet/auth/broker.py"], "/sheet/auth/pubkeydirectory.py": ["/sheet/auth/__init__.py", "/sheet/auth/broker.py"]} |
54,308 | PoWeiChiao/OCT | refs/heads/main | /model/ResUNet.py | import torch
import torch.nn as nn
from torch.nn import functional as F
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, out_channels, stride=1):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.residual_fuction = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels * BasicBlock.expansion, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels * BasicBlock.expansion)
)
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != out_channels * BasicBlock.expansion:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, out_channels * BasicBlock.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels * BasicBlock.expansion)
)
def forward(self, x):
return nn.ReLU(inplace=True)(self.residual_fuction(x) + self.shortcut(x))
class BottleNeck(nn.Module):
expansion = 4
def __init__(self, in_channels, out_channels, stride=1):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.residual_function = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels * BottleNeck.expansion, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels * BottleNeck.expansion)
)
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != out_channels * BottleNeck.expansion:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, out_channels * BottleNeck.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels * BottleNeck.expansion)
)
def forward(self, x):
return nn.ReLU(inplace=True)(self.residual_function(x) + self.shortcut(x))
class ResUNet(nn.Module):
def __init__(self, in_channel, out_channel, block, num_block):
super().__init__()
self.in_channel = in_channel
self.out_channel = out_channel
self.block = block
self.num_block = num_block
self.in_channels = 64
self.conv1 = nn.Sequential(
nn.Conv2d(in_channel, 64, kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True)
)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv2_x = self._make_layer(block, 64, num_block[0], 1)
self.conv3_x = self._make_layer(block, 128, num_block[1], 2)
self.conv4_x = self._make_layer(block, 256, num_block[2], 2)
self.conv5_x = self._make_layer(block, 512, num_block[3], 2)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.dconv_up3 = DoubleConv(512 + 256, 256)
self.dconv_up2 = DoubleConv(256 + 128, 128)
self.dconv_up1 = DoubleConv(128 + 64, 64)
self.dconv_last = nn.Sequential(
nn.Conv2d(128, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(64, out_channel, 1)
)
def _make_layer(self, block, out_channels, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for s in strides:
layers.append(block(self.in_channels, out_channels, s))
self.in_channels = out_channels * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
conv1 = self.conv1(x)
temp = self.maxpool(conv1)
conv2 = self.conv2_x(temp)
conv3 = self.conv3_x(conv2)
conv4 = self.conv4_x(conv3)
bottle = self.conv5_x(conv4)
x = self.upsample(bottle)
x = torch.cat([x, conv4], dim=1)
x = self.dconv_up3(x)
x = self.upsample(x)
x = torch.cat([x, conv3], dim=1)
x = self.dconv_up2(x)
x = self.upsample(x)
x = torch.cat([x, conv2], dim=1)
x = self.dconv_up1(x)
x = self.upsample(x)
x = torch.cat([x, conv1], dim=1)
out = self.dconv_last(x)
return out | {"/eval.py": ["/model/UNet.py", "/utils/dataset.py"], "/train.py": ["/model/UNet.py", "/utils/dataset.py"]} |
54,309 | PoWeiChiao/OCT | refs/heads/main | /model/UNet.py | import torch
import torch.nn as nn
from torch.nn import functional as F
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.down = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.down(x)
class Up(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.up_sample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.up = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up_sample(x1)
# diff in dim 2 and dim 3
diff2 = torch.tensor([x2.size()[2] - x1.size()[2]])
diff3 = torch.tensor([x2.size()[3] - x1.size()[3]])
x1 = F.pad(x1, [diff3 // 2, diff3 - diff3 // 2, diff2 // 2, diff2 - diff2 // 2])
x = torch.cat([x1, x2], dim=1)
return self.up(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
class UNet(nn.Module):
def __init__(self, n_channels, n_classes):
super().__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.conv = DoubleConv(n_channels, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256, 512)
self.down4 = Down(512, 512)
self.up4 = Up(512 + 512, 256)
self.up3 = Up(256 + 256, 128)
self.up2 = Up(128 + 128, 64)
self.up1 = Up(64 + 64, 64)
self.out = OutConv(64, n_classes)
def forward(self, x):
conv1 = self.conv(x)
conv2 = self.down1(conv1)
conv3 = self.down2(conv2)
conv4 = self.down3(conv3)
conv5 = self.down4(conv4)
x = self.up4(conv5, conv4)
x = self.up3(x, conv3)
x = self.up2(x, conv2)
x = self.up1(x, conv1)
logists = self.out(x)
return logists
class conv_block_nested(nn.Module):
def __init__(self, in_channels, mid_channels, out_channels):
super(conv_block_nested, self).__init__()
self.in_channels = in_channels
self.mid_channels = mid_channels
self.out_channels = out_channels
self.conv_block = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=mid_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=mid_channels, out_channels=out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.conv_block(x)
class NestedUNet(nn.Module):
def __init__(self, n_channels, n_classes):
super(NestedUNet, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
filters = [64, 128, 256, 512, 512]
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv0_0 = conv_block_nested(n_channels, filters[0], filters[0])
self.conv1_0 = conv_block_nested(filters[0], filters[1], filters[1])
self.conv2_0 = conv_block_nested(filters[1], filters[2], filters[2])
self.conv3_0 = conv_block_nested(filters[2], filters[3], filters[3])
self.conv4_0 = conv_block_nested(filters[3], filters[4], filters[4])
self.conv0_1 = conv_block_nested(filters[0] + filters[1], filters[0], filters[0])
self.conv1_1 = conv_block_nested(filters[1] + filters[2], filters[1], filters[1])
self.conv2_1 = conv_block_nested(filters[2] + filters[3], filters[2], filters[2])
self.conv3_1 = conv_block_nested(filters[3] + filters[4], filters[3], filters[3])
self.conv0_2 = conv_block_nested(filters[0] * 2 + filters[1], filters[0], filters[0])
self.conv1_2 = conv_block_nested(filters[1] * 2 + filters[2], filters[1], filters[1])
self.conv2_2 = conv_block_nested(filters[2] * 2 + filters[3], filters[2], filters[2])
self.conv0_3 = conv_block_nested(filters[0] * 3 + filters[1], filters[0], filters[0])
self.conv1_3 = conv_block_nested(filters[1] * 3 + filters[2], filters[1], filters[1])
self.conv0_4 = conv_block_nested(filters[0] * 4 + filters[1], filters[0], filters[0])
self.out = nn.Conv2d(in_channels=filters[0], out_channels=n_classes, kernel_size=1)
def forward(self, x):
x0_0 = self.conv0_0(x)
x1_0 = self.conv1_0(self.pool(x0_0))
x0_1 = self.conv0_1(torch.cat([x0_0, self.up(x1_0)], dim=1))
x2_0 = self.conv2_0(self.pool(x1_0))
x1_1 = self.conv1_1(torch.cat([x1_0, self.up(x2_0)], dim=1))
x0_2 = self.conv0_2(torch.cat([x0_0, x0_1, self.up(x1_1)], dim=1))
x3_0 = self.conv3_0(self.pool(x2_0))
x2_1 = self.conv2_1(torch.cat([x2_0, self.up(x3_0)], dim=1))
x1_2 = self.conv1_2(torch.cat([x1_0, x1_1, self.up(x2_1)], dim=1))
x0_3 = self.conv0_3(torch.cat([x0_0, x0_1, x0_2, self.up(x1_2)], dim=1))
x4_0 = self.conv4_0(self.pool(x3_0))
x3_1 = self.conv3_1(torch.cat([x3_0, self.up(x4_0)], dim=1))
x2_2 = self.conv2_2(torch.cat([x2_0, x2_1, self.up(x3_1)], dim=1))
x1_3 = self.conv1_3(torch.cat([x1_0, x1_1, x1_2, self.up(x2_2)], dim=1))
x0_4 = self.conv0_4(torch.cat([x0_0, x0_1, x0_2, x0_3, self.up(x1_3)], dim=1))
out = self.out(x0_4)
return out
def main():
unet = NestedUNet(n_channels=3, n_classes=1)
image = torch.randn(1, 3, 512, 512)
pred = unet(image)
print(unet)
print(pred.size())
if __name__ == '__main__':
main() | {"/eval.py": ["/model/UNet.py", "/utils/dataset.py"], "/train.py": ["/model/UNet.py", "/utils/dataset.py"]} |
54,310 | PoWeiChiao/OCT | refs/heads/main | /utils/generator.py | import glob
import os
import shutil
def main():
root = 'D:/pytorch/Segmentation/Drishti'
raw_data_dir = 'Drishti-GS1_files'
target = 'data'
separator = ['Test', 'Training']
for s in separator:
images_list = glob.glob(os.path.join(root, raw_data_dir, s, 'Images', '*.png'))
for image in images_list:
ss = 'train'
if s == 'Test':
ss = 'test'
shutil.copyfile(image, os.path.join(root, target, ss, 'image', os.path.basename(image)))
masks_list = glob.glob(os.path.join(root, raw_data_dir, s, 'GT', os.path.basename(image)[:-4], 'SoftMap', '*.png'))
for m in masks_list:
if 'ODseg' in m:
shutil.copyfile(m, os.path.join(root, target, ss, 'mask', os.path.basename(image)))
if __name__ == '__main__':
main() | {"/eval.py": ["/model/UNet.py", "/utils/dataset.py"], "/train.py": ["/model/UNet.py", "/utils/dataset.py"]} |
54,311 | PoWeiChiao/OCT | refs/heads/main | /eval.py | import cv2 as cv
import glob
import numpy as np
import os
from PIL import Image
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
from model.UNet import UNet, NestedUNet
from utils.dataset import DrishtiDataset
def save(save_dir, image):
cv.imwrite(save_dir, image)
def create_label_on_image(image_dir, label_dir):
image = Image.open(image_dir)
image = image.resize((512, 512))
label = Image.open(label_dir)
image = np.array(image)
image = image[:, :, ::-1]
label_blue = np.zeros((512, 512))
label_green = np.array(label)
label_red = np.zeros((512, 512))
mask = np.stack([label_blue, label_green, label_red], axis=-1)
mask = np.array(mask, dtype=np.uint8)
image = cv.addWeighted(image, 1, mask, 0.3, 0)
save(os.path.join('predict_mask', os.path.basename(image_dir)), image)
def predict(net, device, dataset, test_dir):
test_loader = DataLoader(dataset=dataset, batch_size=1, shuffle=False)
output = []
net.eval()
with torch.no_grad():
for i, (image, mask) in enumerate(test_loader):
image = image.to(device=device, dtype=torch.float32)
pred = net(image)
pred = np.array(pred.data.cpu()[0])[0]
pred = np.where(pred > 0.5, 255, 0)
pred = np.array(pred, dtype=np.uint8)
output.append(pred)
return output
def main():
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
net = NestedUNet(n_channels=3, n_classes=1)
# net = UNet(n_channels=3, n_classes=1)
net.load_state_dict(torch.load('model.pth', map_location=device))
net.to(device=device)
data_dir = 'data/test'
image_transforms = transforms.Compose([
transforms.Resize(size=(512, 512)),
transforms.ToTensor()
])
dataset_test = DrishtiDataset(data_dir=data_dir, image_transforms=image_transforms, is_random=False)
test_dir = 'data/test'
test_list = glob.glob(os.path.join(test_dir, 'image', '*.png'))
test_list.sort()
pred = predict(net, device, dataset_test, test_dir)
for i, (test) in enumerate(test_list):
save(os.path.join('predict', os.path.basename(test)), pred[i])
create_label_on_image(os.path.join(test_dir, 'image', os.path.basename(test)), os.path.join('predict', os.path.basename(test)))
if __name__ == '__main__':
main()
| {"/eval.py": ["/model/UNet.py", "/utils/dataset.py"], "/train.py": ["/model/UNet.py", "/utils/dataset.py"]} |
54,312 | PoWeiChiao/OCT | refs/heads/main | /utils/dataset.py | import glob
import os
import numpy as np
import random
from PIL import Image
import torch
from torch.utils.data import Dataset
from torchvision import transforms
class DrishtiDataset(Dataset):
def __init__(self, data_dir, image_transforms, is_random=True):
self.data_dir = data_dir
self.image_transforms = image_transforms
self.is_random = is_random
self.images_list = glob.glob(os.path.join(data_dir, 'image', '*.png'))
self.masks_list = glob.glob(os.path.join(data_dir, 'mask', '*.png'))
self.images_list.sort()
self.masks_list.sort()
def __len__(self):
return len(self.images_list)
def __getitem__(self, idx):
image = Image.open(self.images_list[idx])
mask = Image.open(self.masks_list[idx])
if self.is_random:
isFlipLR = random.random()
isFlipTB = random.random()
if isFlipLR > 0.5:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
if isFlipTB > 0.5:
image = image.transpose(Image.FLIP_TOP_BOTTOM)
mask = mask.transpose(Image.FLIP_TOP_BOTTOM)
mask = mask.resize((512, 512), 0)
mask = np.array(mask)
mask = np.where(mask > 0, 1, 0)
mask = torch.from_numpy(mask)
mask = mask.unsqueeze(0)
image = self.image_transforms(image)
return image, mask
def main():
data_dir = 'D:/pytorch/Segmentation/Drishti/data/train'
image_transforms = transforms.Compose([
transforms.Resize(size=(512, 512)),
transforms.ToTensor()
])
dataset = DrishtiDataset(data_dir=data_dir, image_transforms=image_transforms)
print(dataset.__len__())
image, mask = dataset.__getitem__(0)
print(image.shape)
print(mask.shape)
if __name__ == '__main__':
main()
| {"/eval.py": ["/model/UNet.py", "/utils/dataset.py"], "/train.py": ["/model/UNet.py", "/utils/dataset.py"]} |
54,313 | PoWeiChiao/OCT | refs/heads/main | /train.py | import os
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import transforms
from model.UNet import UNet, NestedUNet
from utils.dataset import DrishtiDataset
from utils.DiceLoss import DiceLoss
from utils.logger import Logger
def train(net, device, dataset, batch_size=2, epochs=50, lr=1e-4):
train_loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True)
optimizer = optim.RMSprop(params=net.parameters(), lr=lr, weight_decay=1e-8, momentum=0.9)
criterion = DiceLoss()
log_train = Logger('log_train.txt')
best_loss = float('inf')
for epoch in range(epochs):
train_loss = 0.0
print('running epoch: {}'.format(epoch))
net.train()
for image, mask in tqdm(train_loader):
image = image.to(device=device, dtype=torch.float32)
mask = mask.to(device=device, dtype=torch.float32)
pred = net(image)
loss = criterion(pred, mask)
train_loss += loss.item() * image.size(0)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss = train_loss / len(train_loader.dataset)
print('\tTraining Loss: {:.6f}'.format(train_loss))
log_train.write_line(str(epoch) + ' ' + str(round(train_loss, 6)))
if train_loss <= best_loss:
best_loss = train_loss
torch.save(net.state_dict(), 'model.pth')
print('model saved')
def main():
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('device: {}'.format(device))
net = NestedUNet(n_channels=3, n_classes=1)
# net = UNet(n_channels=3, n_classes=1)
if os.path.isfile('model.pth'):
net.load_state_dict(torch.load('model.pth', map_location=device))
net.to(device=device)
data_dir = 'data/train'
image_transforms = transforms.Compose([
transforms.Resize(size=(512, 512)),
transforms.ToTensor()
])
dataset_train = DrishtiDataset(data_dir=data_dir, image_transforms=image_transforms)
train(net=net, device=device, dataset=dataset_train)
if __name__ == '__main__':
main() | {"/eval.py": ["/model/UNet.py", "/utils/dataset.py"], "/train.py": ["/model/UNet.py", "/utils/dataset.py"]} |
54,333 | kinow/hummingbird | refs/heads/master | /hummingbird/processes/wps_hdh_qachecker.py | import tarfile
from pywps import Process
from pywps import LiteralInput
from pywps import ComplexInput, ComplexOutput
from pywps import Format
from pywps.app.Common import Metadata
import logging
LOGGER = logging.getLogger("PYWPS")
class QualityChecker(Process):
def __init__(self):
inputs = [
ComplexInput('dataset', 'NetCDF File',
abstract='You may provide a URL or upload a NetCDF file.',
min_occurs=1,
max_occurs=1024,
supported_formats=[Format('application/x-netcdf')]),
LiteralInput('project', 'Project',
data_type='string',
abstract="Climate model data project to be checked: CORDEX or CMIP5",
min_occurs=1,
max_occurs=1,
default='CORDEX',
allowed_values=['CORDEX', 'CMIP5']),
]
outputs = [
ComplexOutput('output', 'Quality Checker Report',
abstract="Qualtiy checker results as tar archive.",
as_reference=True,
supported_formats=[Format('application/x-tar-gz')]),
ComplexOutput('logfile', 'Quality Checker Logfile',
abstract="Qualtiy checker summary logfile",
as_reference=True,
supported_formats=[Format('text/yaml')]),
]
super(QualityChecker, self).__init__(
self._handler,
identifier="qa_checker",
title="Quality Assurance Checker by DKRZ",
version="0.6.3",
abstract="The Quality Assurance checker QA-DKRZ checks conformance of meta-data of climate simulations"
" given in NetCDF format with conventions and rules of climate model projects."
" At present, checking of CF Conventions, CMIP5, and CORDEX is supported."
" Development and maintenance for the QA checker is done by the"
" German Climate Computing Centre (DKRZ)."
" If you have suggestions for improvement then please contact"
" Heinz-Dieter Hollweg at DKRZ (hollweg@dkrz.de).",
metadata=[
Metadata('Birdhouse', 'http://bird-house.github.io/'),
Metadata('User Guide', 'http://birdhouse-hummingbird.readthedocs.io/en/latest/'),
Metadata('CF Conventions', 'http://cfconventions.org/'),
Metadata('QA Checker Documentation', 'http://qa-dkrz.readthedocs.io/en/latest/'),
Metadata('Conda Package', 'http://anaconda.org/birdhouse/qa-dkrz'),
Metadata('GitHub', 'https://github.com/IS-ENES-Data/QA-DKRZ'),
],
inputs=inputs,
outputs=outputs,
status_supported=True,
store_supported=True,
)
def _handler(self, request, response):
from hummingbird.processing import hdh_qa_checker
response.update_status("starting qa checker ...", 0)
datasets = [dataset.file for dataset in request.inputs['dataset']]
logfile = results_path = None
for idx, ds in enumerate(datasets):
progress = idx * 100 / len(datasets)
response.update_status("checking %s" % ds, progress)
logfile, results_path = hdh_qa_checker(ds, project=request.inputs['project'][0].data)
if logfile and results_path:
# output tar archive
with tarfile.open('output.tar.gz', "w:gz") as tar:
response.outputs['output'].file = tar.name
tar.add(results_path)
response.outputs['logfile'].file = logfile
response.update_status("qa checker done.", 100)
return response
| {"/hummingbird/processes/wps_hdh_qachecker.py": ["/hummingbird/processing.py"], "/tests/test_wps_hdh.py": ["/hummingbird/processes/wps_hdh_cfchecker.py"], "/hummingbird/processes/wps_hdh_cfchecker.py": ["/hummingbird/processing.py"]} |
54,334 | kinow/hummingbird | refs/heads/master | /tests/test_wps_hdh.py | import pytest
from pywps import Service
from pywps.tests import assert_response_success
from .common import TESTDATA, client_for
from hummingbird.processes.wps_hdh_cfchecker import HDHCFChecker
@pytest.mark.skip(reason="no way of currently testing this")
@pytest.mark.online
def test_wps_qa_cfchecker():
client = client_for(Service(processes=[HDHCFChecker()]))
datainputs = "dataset@xlink:href={0};cf_version=auto".format(TESTDATA['noaa_nc_1'])
resp = client.get(
service='WPS', request='Execute', version='1.0.0',
identifier='qa_cfchecker',
datainputs=datainputs)
assert_response_success(resp)
| {"/hummingbird/processes/wps_hdh_qachecker.py": ["/hummingbird/processing.py"], "/tests/test_wps_hdh.py": ["/hummingbird/processes/wps_hdh_cfchecker.py"], "/hummingbird/processes/wps_hdh_cfchecker.py": ["/hummingbird/processing.py"]} |
54,335 | kinow/hummingbird | refs/heads/master | /hummingbird/processing.py | import os
import glob
import subprocess
from subprocess import check_output, CalledProcessError
from .utils import fix_filename, make_dirs
import logging
LOGGER = logging.getLogger("PYWPS")
def ncgen(cdl_file, output_file=None):
'''
Returns NetCDF file from CDL file.
'''
output_file = output_file or 'output.nc'
try:
subprocess.run(['ncgen', '-k', 'nc4', '-o', output_file, cdl_file], check=True)
except Exception as err:
LOGGER.error("Could not generate ncdump: {}".format(err))
pass
def ncdump(dataset):
'''
Returns the metadata of the dataset
Code taken from https://github.com/ioos/compliance-checker-web
'''
try:
output = check_output(['ncdump', '-h', dataset])
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split('\n')
# replace the filename for safety
dataset_id = os.path.basename(dataset) # 'uploaded-file'
lines[0] = 'netcdf {} {{'.format(dataset_id)
# decode to ascii
filtered_lines = ['{}\n'.format(line) for line in lines]
except Exception as err:
LOGGER.error("Could not generate ncdump: {}".format(err))
return "Error: generating ncdump failed"
return filtered_lines
def cmor_tables_path():
os.environ['UVCDAT_ANONYMOUS_LOG'] = 'no'
import cmor
tables_path = os.path.abspath(
os.path.join(cmor.__file__, '..', '..', '..', '..', '..', 'share', 'cmip6-cmor-tables', 'Tables'))
return tables_path
def cmor_tables():
tables = glob.glob(os.path.join(cmor_tables_path(), 'CMIP6_*.json'))
table_names = [os.path.basename(table)[0:-5] for table in tables]
table_names.sort()
return table_names
def cmor_dump_output(dataset, status, output, output_filename):
import string
if not isinstance(output, str):
output = output.decode('utf-8')
# show filename
dataset_id = os.path.basename(dataset) # 'uploaded-file'
converted_lines = []
converted_lines.append('## Checking NetCDF file {}\n\n'.format(dataset_id))
if status is True:
converted_lines.append("Dateset *passed* CMIP6 cmor checks:\n")
else:
converted_lines.append("Dateset *failed* CMIP6 cmor checks:\n")
# decode to ascii
for line in output.split('\n'):
line = line.translate(None, '!')
if chr(27) in line:
continue
if "In function:" in line:
continue
if "called from:" in line:
continue
line = line.strip()
if not line: # skip empty lines
continue
# remove non printable chars
line = ''.join([x for x in line if x in string.printable])
# error list option
if line.startswith("Error:"):
line = "\n* " + line
converted_lines.append(str(line) + '\n')
with open(output_filename, 'w') as fp:
fp.writelines(converted_lines)
def cmor_checker(dataset, table='CMIP6_CV', variable=None, output_filename=None):
output_filename = output_filename or 'out.txt'
try:
cmd = ['PrePARE']
if variable:
cmd.extend(['--variable', variable])
table_path = os.path.join(cmor_tables_path(), table + '.json')
cmd.append(table_path)
cmd.append(dataset)
LOGGER.debug("run command: %s", cmd)
os.environ['UVCDAT_ANONYMOUS_LOG'] = 'no'
output = check_output(cmd, stderr=subprocess.STDOUT)
cmor_dump_output(dataset, True, output, output_filename)
except CalledProcessError as err:
LOGGER.warn("CMOR checker failed on dataset: %s", os.path.basename(dataset))
cmor_dump_output(dataset, False, err.output, output_filename)
return False
return True
def hdh_cf_check(filename, version="auto"):
# TODO: maybe use local file path
filename = os.path.abspath(fix_filename(filename))
cmd = ["dkrz-cf-checker", filename]
if version != "auto":
cmd.extend(['-C', version])
try:
output = check_output(cmd, stderr=subprocess.STDOUT)
except CalledProcessError as err:
LOGGER.exception("cfchecks failed!")
return "Error: cfchecks failed: {0}. Output: {0.output}".format(err)
return output
def hdh_qa_checker(filename, project, qa_home=None):
# TODO: maybe use local file path
filename = os.path.abspath(fix_filename(filename))
# create qa_home
# qa_home = os.path.join(config.cache_path(), "qa_dkrz")
if not qa_home:
qa_home = os.path.abspath("./work")
make_dirs(qa_home)
cmd = ["qa-dkrz", "-P", project]
if qa_home:
cmd.append("--work=" + qa_home)
cmd.append(filename)
try:
check_output(cmd, stderr=subprocess.STDOUT)
except CalledProcessError as err:
LOGGER.exception("qa checker failed!")
msg = "qa checker failed: {0}. Output: {0.output}".format(err)
raise Exception(msg)
results_path = os.path.join("QA_Results", "check_logs")
if not os.path.isdir(results_path):
raise Exception("QA results are missing.")
# output logfile
logs = glob.glob(os.path.join(results_path, "*.log"))
if not logs:
logs = glob.glob(os.path.join(results_path, ".*.log"))
if logs:
# use .txt extension
logfile = logs[0][:-4] + '.txt'
os.link(logs[0], logfile)
else:
raise Exception("could not find log file.")
return logfile, results_path
| {"/hummingbird/processes/wps_hdh_qachecker.py": ["/hummingbird/processing.py"], "/tests/test_wps_hdh.py": ["/hummingbird/processes/wps_hdh_cfchecker.py"], "/hummingbird/processes/wps_hdh_cfchecker.py": ["/hummingbird/processing.py"]} |
54,336 | kinow/hummingbird | refs/heads/master | /hummingbird/processes/wps_hdh_cfchecker.py | from pywps import Process
from pywps import LiteralInput
from pywps import ComplexInput, ComplexOutput
from pywps import Format
from pywps.app.Common import Metadata
import logging
LOGGER = logging.getLogger("PYWPS")
class HDHCFChecker(Process):
def __init__(self):
inputs = [
ComplexInput('dataset', 'NetCDF File',
abstract='You may provide a URL or upload a NetCDF file.',
metadata=[Metadata('Info')],
min_occurs=1,
max_occurs=1024,
supported_formats=[Format('application/x-netcdf')]),
LiteralInput('cf_version', 'Check against CF version',
data_type='string',
abstract="Version of CF conventions that the NetCDF file should be check against."
" Use auto to auto-detect the CF version.",
min_occurs=1,
max_occurs=1,
default='auto',
allowed_values=["auto", "1.6", "1.5", "1.4"]),
]
outputs = [
ComplexOutput('output', 'CF Checker Report',
abstract="Summary of the CF compliance check",
as_reference=True,
supported_formats=[Format('text/plain')]),
]
super(HDHCFChecker, self).__init__(
self._handler,
identifier="qa_cfchecker",
title="CF Checker by DKRZ",
version="0.6.3",
abstract="The NetCDF Climate Forcast Conventions compliance checker by DKRZ."
" This process allows you to run the compliance checker to check that"
" the contents of a NetCDF file comply with the Climate and Forecasts (CF) Metadata Convention."
" The CF Conformance checker applies to conventions 1.4 -1.7draft."
" Development and maintenance for the CF-checker is done by the"
" German Climate Computing Centre (DKRZ)."
" If you have suggestions for improvement then please contact"
" Heinz-Dieter Hollweg at DKRZ (hollweg@dkrz.de).",
metadata=[
Metadata('Birdhouse', 'http://bird-house.github.io/'),
Metadata('User Guide', 'http://birdhouse-hummingbird.readthedocs.io/en/latest/'),
Metadata('CF Conventions', 'http://cfconventions.org/'),
Metadata('CF Checker Documentation', 'http://qa-dkrz.readthedocs.io/en/latest/'),
Metadata('Conda Package', 'http://anaconda.org/birdhouse/qa-dkrz'),
Metadata('GitHub', 'https://github.com/IS-ENES-Data/QA-DKRZ'),
],
inputs=inputs,
outputs=outputs,
status_supported=True,
store_supported=True,
)
def _handler(self, request, response):
from hummingbird.processing import hdh_cf_check
response.update_status("starting cfchecker ...", 0)
# TODO: iterate input files ... run parallel
# TODO: generate html report with links to cfchecker output ...
datasets = [dataset.file for dataset in request.inputs['dataset']]
max_count = len(datasets)
step = 100.0 / max_count
for idx, dataset in enumerate(datasets):
cf_report = hdh_cf_check(dataset, version=request.inputs['cf_version'][0].data)
with open('cfchecker_output.txt', 'a') as fp:
response.outputs['output'].file = fp.name
fp.write(cf_report)
response.update_status("cfchecker: %d/%d" % (idx, max_count), int(idx * step))
response.update_status("cfchecker done.", 100)
return response
| {"/hummingbird/processes/wps_hdh_qachecker.py": ["/hummingbird/processing.py"], "/tests/test_wps_hdh.py": ["/hummingbird/processes/wps_hdh_cfchecker.py"], "/hummingbird/processes/wps_hdh_cfchecker.py": ["/hummingbird/processing.py"]} |
54,349 | RayaneDev/JVCTopicLive | refs/heads/main | /app.py | from flask import Flask, request, render_template
from TopaxAPI import get_html, get_last_link, get_new_posts
import bs4 as bs
import vars
import json
app = Flask(__name__, static_url_path='/static')
@app.route("/")
def index():
return render_template('index.html')
@app.route("/watchTopic/", methods=['GET'])
def watch_topic() :
url = request.args.to_dict(flat=False)['url'][0]
vars.topax = url
vars.data = get_html(vars.topax)
vars.soup = bs.BeautifulSoup(vars.data,'lxml')
# On va à la dernière page
link = get_last_link()
if link != None :
vars.topax = link
vars.data = get_html(vars.topax)
vars.soup = bs.BeautifulSoup(vars.data,'lxml')
return json.dumps({'response' : url})
@app.route("/getLastPosts/", methods=['GET'])
def get_last_posts() :
url = request.args.to_dict(flat=False)['url'][0]
posts = get_new_posts()
return json.dumps({'posts' : posts}) | {"/app.py": ["/TopaxAPI.py", "/vars.py"], "/TopaxAPI.py": ["/vars.py"]} |
54,350 | RayaneDev/JVCTopicLive | refs/heads/main | /TopaxAPI.py | import urllib.request
import bs4 as bs
import vars
def get_html(url) :
webUrl = urllib.request.urlopen(url)
data = webUrl.read()
return data.decode('utf-8')
def get_last_link() :
link = vars.soup.find_all('div', {'class': 'bloc-liste-num-page'})[0].find_all('span')[::-1]
index = 1 if link[0].text == '»' else 0
if len(link) > 1 and link[index].find('a') != None :
link = "https://www.jeuxvideo.com" + link[index].find('a')['href']
return link
return None
def get_new_posts():
posts = []
if vars.lastPost == False :
vars.lastPost = vars.soup.find_all('div', {'class' : 'bloc-message-forum'})[-1]['data-id']
else :
messages = vars.soup.find_all('div', {'class' : 'bloc-message-forum'})
record = False
for i in range (0, len(messages)) :
if record or vars.newPage :
post = []
#post = messages[i].find_all('div', {'class' : 'txt-msg'})[0]
post.append(messages[i].find('img', {'class' : 'user-avatar-msg'})['data-srcset'])
post.append(messages[i].find('span', {'class' : 'bloc-pseudo-msg'}).text.rstrip().strip())
post.append(messages[i].find('div', {'class' : 'bloc-date-msg'}).find('span').text)
post.append(str(messages[i].find('div', {'class' : 'bloc-contenu'})))
posts.append(post)
vars.lastPost = messages[i]['data-id']
if messages[i]['data-id'] == vars.lastPost :
record = True
vars.newPage = False
if len(messages) == 20 :
link = get_last_link()
if link != None :
vars.topax = link
print(vars.topax)
vars.newPage = True
vars.data = get_html(vars.topax)
vars.soup = bs.BeautifulSoup(vars.data,'lxml')
return posts
| {"/app.py": ["/TopaxAPI.py", "/vars.py"], "/TopaxAPI.py": ["/vars.py"]} |
54,351 | RayaneDev/JVCTopicLive | refs/heads/main | /vars.py | topax = None
soup = None
data = None
lastPost = False # ID du dernier post
newPage = False | {"/app.py": ["/TopaxAPI.py", "/vars.py"], "/TopaxAPI.py": ["/vars.py"]} |
54,352 | RayaneDev/JVCTopicLive | refs/heads/main | /appCLI.py | import urllib.request
import bs4 as bs
import threading
topax = "https://www.jeuxvideo.com/forums/42-51-67196854-1-0-1-0-direct-le-senat-examine-la-loi-pass-sanitaire.htm"
def get_html(url) :
webUrl = urllib.request.urlopen(topax)
data = webUrl.read()
return data.decode('utf-8')
def get_last_link() :
link = soup.find_all('div', {'class': 'bloc-liste-num-page'})[0].find_all('span')[::-1]
index = 1 if link[0].text == '»' else 0
if len(link) > 1 and link[index].find('a') != None :
link = "https://www.jeuxvideo.com" + link[index].find('a')['href']
return link
return None
data = get_html(topax)
# On va à la dernière page
soup = bs.BeautifulSoup(data,'lxml')
link = get_last_link()
if link != None :
topax = link
data = get_html(topax)
delay = 5.0 # Délai en secondes
lastPost = False # ID du dernier post
posts = []
newPage = False
def get_new_posts():
global lastPost, data, bs, soup, newPage, topax
threading.Timer(delay, get_new_posts).start()
if lastPost == False :
lastPost = soup.find_all('div', {'class' : 'bloc-message-forum'})[-1]['data-id']
else :
messages = soup.find_all('div', {'class' : 'bloc-message-forum'})
record = False
for i in range (0, len(messages)) :
if record or newPage :
post = messages[i].find_all('div', {'class' : 'txt-msg'})[0].find_all('p')[-1].text
posts.append(post)
lastPost = messages[i]['data-id']
print (post, "\n===========================================================")
if messages[i]['data-id'] == lastPost :
record = True
newPage = False
if len(messages) == 20 :
link = get_last_link()
if link != None :
topax = link
print(topax)
newPage = True
data = get_html(topax)
soup = bs.BeautifulSoup(data,'lxml')
get_new_posts()
| {"/app.py": ["/TopaxAPI.py", "/vars.py"], "/TopaxAPI.py": ["/vars.py"]} |
54,357 | apl-ocean-engineering/yolo_4channel | refs/heads/master | /ProcessImageDirectory.py | #!/usr/bin/env python3
# COPY THIS SCRIPT TO THE LOCATION OF "IMAGES" FOLDER CONTAINING ALL IMAGES
# Note: same location as labelme_manipulation script
import glob
import os
import cv2
import numpy
from shutil import copy2
from natsort import natsorted # deals with python sorting weirdness (images with varying # of digits at end)
FILE_TYPE = ".png"
IMAGE_DIR = "/home/tanner/Cinderblock/images"
# cwd = IMAGE_DIR
cwd = os.getcwd()
# TODO:
# enable command line input to specify processing technique
def saveResult(result_image, image_path):
fname = os.path.basename(image_path)
# print(fname)
result_path = cwd + "/processed_images/" + image_path
dir_path = result_path[:-len(fname)]
# print(dir_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path, exist_ok = True)
# print("RESULT PATH: " + result_path)
# print("RESULT SHAPE: " + str(result_image.shape))
cv2.imwrite(result_path, result_image)
# hacked-together method of getting .json files copied over
try:
json_path = image_path[:-len(FILE_TYPE)]
json_path = json_path + ".json"
save_json_path = result_path[:-len(FILE_TYPE)]
save_json_path = save_json_path + ".json"
# print("COPYING " + json_path + " TO " + save_json_path)
copy2(json_path, save_json_path)
except:
print("no .json file associated with current image")
def processDir_TempDisparity(fnames):
# need to process one directory at a time, skip first image
# requires passing current directory list of images
# idea is that each image will have a 4th channel of the differences between it and the previous image
# therefore first image in directory will have nothing in the 4th channel
# from skimage.measure import compare_ssim
from skimage.metrics import structural_similarity as compare_ssim
import imutils
firstImage = True
for i in range(len(fnames)):
if firstImage:
firstImage = False
pass
else:
# load images in grayscale
img1 = cv2.imread(fnames[i], 0)
img2 = cv2.imread(fnames[i-1], 0) # comparing to previous image
# compute difference via SSIM, make image we can add to 4th channel
(score, diff) = compare_ssim(img1, img2, full=True)
diff = (diff * 255).astype("uint8")
# cv2.imshow("diff", diff)
# cv2.waitKey(10)
original_img1 = cv2.imread(fnames[i], -1)
result_image = numpy.dstack((original_img1, diff))
image_path = fnames[i]
saveResult(result_image, image_path)
def processDir_StereoDisparity(fnames):
# needs to have matching frames from left and right directories for each subset
# load left and right matching frames in grayscale
# make left-based disparity image
# flip images (?)
# mirror each one? or switch which one we call left vs right?
# make right-based disparity image
# save 4th channel in each
# TODO: update saveResult to use os.path.dirname function instead of string manipulation
subset_path = os.path.dirname(os.path.dirname(fnames[0]))
left_fnames = natsorted(glob.glob(subset_path + '/left/*' + FILE_TYPE))
right_fnames = natsorted(glob.glob(subset_path + '/right/*' + FILE_TYPE))
# print("LEFT: " + str(left_fnames))
# print("RIGHT: " + str(right_fnames))
for i in range(len(left_fnames)):
# make sure there is a matching frame in right_fnames
# last 3 characters of each frame (not including file extension) must match
# still works for 2-digit numbered images
test_left = left_fnames[i][-(3 + len(FILE_TYPE)):]
# print("TEST: " + test_left)
test_right = right_fnames[i][-(3 + len(FILE_TYPE)):]
if test_left == test_right:
# frames match
# compute left-based disparity
left_img = cv2.imread(left_fnames[i], 0)
right_img = cv2.imread(right_fnames[i], 0)
stereo = cv2.StereoBM_create(numDisparities=16, blockSize=15)
left_disparity = stereo.compute(left_img, right_img).astype("uint8")
# compute right-based disparity
left_flipped = cv2.flip(left_img, 1)
right_flipped = cv2.flip(right_img, 1)
right_disparity = stereo.compute(right_flipped, left_flipped).astype("uint8")
cv2.imshow("left-based", left_disparity)
cv2.imshow("right-based", right_disparity)
cv2.waitKey(10)
else:
pass
def processDir_Batch(fnames):
# convert the current + next 4 images to grayscale
# concatenate all 5 and save into the 4th channel of image1
# ex: image1 will have in its 4th channel data from img1, img2, img3, img4, img5
# image2 will have in its 4th channel data from img2, img3, img4, img5, img6 and so on
# skips images that don't have 4 subsequent images in the directory
for i in range(len(fnames)):
try:
print("new batch")
# load next 5 images and convert to grayscale
image_path = fnames[i]
img5 = cv2.imread(fnames[i+4], 0)
img4 = cv2.imread(fnames[i+3], 0)
img3 = cv2.imread(fnames[i+2], 0)
img2 = cv2.imread(fnames[i+1], 0)
img1 = cv2.imread(fnames[i], 0)
original_img1 = cv2.imread(fnames[i], 1)
img_sum = img1 + img2 + img3 + img4 + img5
result_image = numpy.dstack((original_img1, img_sum))
print("IMAGE PATH (passed to save result): " + image_path)
print("saving result!")
saveResult(result_image, image_path)
# except error as e:
# print(e)
except:
print("less than 5 images left in directory")
# print("current image: " + fnames[i])
# unchanged_image = cv2.imread(fnames[i], 1)
# unchanged_path = fnames[i]
# saveResult(unchanged_image, unchanged_path)
# print("saved unchanged image!")
# note: this will result in runnning darknet on images with different formatting
# some will have 4th channel, others not. may cause issues / errors?
# could possibly fix by giving these images 4th channel of all zeros?
def processDir_Canny(fnames):
for fname in fnames:
image = cv2.imread(fname, -1)
edges = cv2.Canny(image,100,200)
edges = edges.astype('uint8')
result_image = numpy.dstack((image, edges))
saveResult(result_image, fname)
def processDir_MOG(fnames):
for fname in fnames:
backSub = cv2.createBackgroundSubtractorMOG2()
image = cv2.imread(fname)
fgMask = backSub.apply(image)
print("FGMASK SHAPE: " + str(fgMask.shape))
cv2.imshow('Frame', image)
cv2.imshow('FG Mask', fgMask)
fgMask = fgMask.astype('uint8')
cv2.waitKey(10)
result_image = numpy.dstack((image, fgMask))
saveResult(result_image, fname)
def getAllFilePathList():
init_files = glob.glob(cwd + '/**/*' + FILE_TYPE, recursive=True)
files = []
for path in init_files:
if "processed_images" in path:
pass
else:
files.append(os.path.relpath(path))
print(files)
return files
def getDirectoryList():
directories = None
return directories
if __name__ == "__main__":
#files = getAllFilePathList()
base_dirs = sorted(glob.glob("images/*"))
for base_dir in base_dirs:
print("DIR: " + base_dir)
sub_dirs = sorted(glob.glob(base_dir + "/*"))
for sub_dir in sub_dirs:
print("SUB DIR: " + sub_dir)
fnames = natsorted(glob.glob(sub_dir + "/*" + FILE_TYPE, recursive=True))
# process current directory and save results
# processDir_Batch(fnames)
processDir_StereoDisparity(fnames)
# if not os.path.exists('processed_images'):
# os.mkdir('processed_images')
#
# for image in files:
# cur_image = cv2.imread(image)
# fname = os.path.basename(image)
# # print(fname)
# result_image = processImage_MOG(cur_image)
# # save resulting image
# result_path = cwd + "/processed_images/" + image
# dir_path = result_path[:-len(fname)]
# print(dir_path)
# print
# if not os.path.exists(dir_path):
# os.makedirs(dir_path, exist_ok = True)
#
# cv2.imwrite(result_path, result_image)
| {"/BG_Subtraction.py": ["/ProcessImageDirectory.py"]} |
54,358 | apl-ocean-engineering/yolo_4channel | refs/heads/master | /BG_Subtraction.py | #!/usr/bin/env python3
# TODO:
# - try out different methods of background subtraction
# - current: Mixture of Gaussians
# - constantly update background model throughout all pictures
# - reset background ever N number of frames (since the camera is moving)
# - visualize how they can be used to clean up images
# - subtract the background in order to have just the cinderblock in the picture
# nabbed from https://docs.opencv.org/master/d1/dc5/tutorial_background_subtraction.html
from __future__ import print_function
from ProcessImageDirectory import *
import cv2 as cv
import ProcessImageDirectory
backSub = cv.createBackgroundSubtractorMOG2()
# backSub = cv.createBackgroundSubtractorKNN()
files = getFilePathList()
for file in files:
frame = cv2.imread(file)
fgMask = backSub.apply(frame)
print("IMG SHAPE: " + str(frame.shape))
print("MASK SHAPE: " + str(fgMask.shape))
cv.imshow('Frame', frame)
cv.imshow('FG Mask', fgMask)
keyboard = cv.waitKey(30)
if keyboard == 'q' or keyboard == 27:
break
| {"/BG_Subtraction.py": ["/ProcessImageDirectory.py"]} |
54,360 | phanivallur/Realty | refs/heads/master | /realty.py | # -*- coding: utf-8 -*-
import scrapy
from scrapy_splash import SplashRequest
from bs4 import BeautifulSoup
from locality import Locality
import json
import operator
class RealtySpider(scrapy.Spider):
name = 'realty'
allowed_domains = ['99acres.com']
#start_urls = ['https://www.99acres.com/property-rates-and-price-trends-in-hyderabad']
def start_requests(self):
yield SplashRequest(
url='https://www.99acres.com/property-rates-and-price-trends-in-hyderabad',
callback=self.parse,
)
def parse(self, response):
soup=BeautifulSoup(response.text,'lxml')
data=[]
for table in soup.find_all("table",{"class":"prTble"}):
rows=table.find_all("tr")
for row in rows:
cols=row.find_all("td")
cols=[ele.text.strip() for ele in cols]
data.append([ele for ele in cols if ele])
resultant_objects=self.create_objects(data)
sorted_data=sorted(resultant_objects,key=operator.attrgetter('area'))
i=1
for obj in sorted_data:
yield {
str(i):obj.__str__()
}
i=i+1
def create_objects(self,input):
obj_list=[]
i=1
for obj in input:
if len(obj) != 0:
locality=Locality(obj[0],obj[1],obj[2],obj[3],obj[4],obj[5],obj[6])
obj_list.append(locality)
i=i+1
return obj_list
| {"/realty.py": ["/locality.py"]} |
54,361 | phanivallur/Realty | refs/heads/master | /locality.py | class Locality:
def __init__(self,area,sqft_price,qbyq,trends,onebhk,twobhk,threebhk):
self.area=area
self.sqft_price=sqft_price
self.qbyq=qbyq
self.trends=trends
self.onebhk=onebhk
self.twobhk=twobhk
self.threebhk=threebhk
def __str__(self):
return self.area+", "+self.sqft_price+", "+self.qbyq+", "+self.trends+", "+self.onebhk+", "+self.twobhk+", "+self.threebhk
| {"/realty.py": ["/locality.py"]} |
54,367 | yohoc/busstation | refs/heads/master | /rm/bus_stop.py | # -*- coding: utf-8 -*-
import requests
import json
url = 'http://restapi.amap.com/v3/bus/linename?offset=20&city=guangzhou&keywords=540%E8%B7%AF&language=zh&key=1d55ec451057366bda9ebe2352f82e6f&page=1&extensions=all&scode=e23e9e96e58467fb7f7daa5687288238&ts=1481770522211'
#url = 'http://restapi.amap.com/v3/bus/linename'
headers = {
'Host': 'restapi.amap.com',
'Accept': '*/*',
'logversion': '2.0',
'platinfo': 'product=sea&platform=iOS&sdkversion=3.1.0',
'Connection': 'keep-alive',
'x-info': 'H4sIAAAAAAAAAwGwAU/+Nlphl4SypRGFsUv7zEfrT/JyrwVqoDMDGndMDCgcTMAPxAnK7IRFwm9Ao03rxYo8yqkNaLdbaV1f+ISe1JCn9SNxhvhl5rTLBwn+6jhuvsU3C76Nb67CcDjug7CCMU7L8w4XyHwDhaskQINdaTrVaa51G07st3qXp+5lGAeEB6OUJb6pmbUBKDVzho+vOSOqdkI64/Wu33giLtnQUA2Vis47/HYOVoATYCwyjD9rzZrHFXn10Cero5ASxJa3zihpp9mPa1L8cHKw9PvPPZ5Sruuj6xk8TrgqRw9dRNDHGNbGWAZYebWAYK9Pkmfn/bJvchQ22hzd/jDCAJxQQ6gvKg9ZfwZl81p7SCrRXX+MSAmn2xTVFVYsIA1l/CYzJ0McTQC8oVI82tL0tKW8ClK5T5NlymIT9KK/yHrxmjOty/jC0OpDC6dYC2WJJqmFBEoz3aKBVnG9WHDWQtCf2Cj7TKsMnuUdbYnzzFeLmTwkbcyr2Z7nT7FExdO3GipY9HvAlcdxKrPTpLjUTDORBaW8S0xbeItUoNvPJYBv+FJOGLMJdxPbgb4r5+RKfBWjOztlLMgtqbABAAA=',
'User-Agent': 'AMAP_SDK_iOS_Search_3.1.0',
'Accept-Language': 'zh-cn',
'Accept-Encoding': 'gzip,deflate',
'Connection': 'keep-alive'
}
headers_run = {
'Host': 'nxxtapi.gzyyjt.net:9009',
'Content-Type': 'application/json',
'Accept': '*/*',
'Connection': 'keep-alive',
'Cookie': 'DYNSRV=s1',
'User-Agent': 'src_201510_iOS_unionxxt/3.0.3 (iPhone; iOS 9.3.3; Scale/2.00)',
'Accept-Language': 'zh-Hans-CN;q=1',
'Accept-Encoding': 'gzip, deflate',
'Content-Length': '422'
}
url_getrun = 'http://nxxtapi.gzyyjt.net:9009/xxt_api/bus/runbus/getByRouteAndDirection'
payload = {"sign":"BFB899F1CF926D2509A3CE8F6AC46BE3CC1ED80B","appid":"xxtGci01","timestamp":"1481787958985","data":"{\"routeId\":\"1110\",\"direction\":\"0\"}","reqpara":"{\"devtype\":1,\"speed\":\"0.0\",\"direc\":\"0.0\",\"versiontype\":4,\"uid\":14042,\"reserved\":\"iOS\",\"gpstime\":\"1481783578141\",\"devno\":\"BD4DB42F-1111-4EA3-8BF7-82A032B10D71\",\"version\":\"3.0.3\",\"lng\":113.3251610781745,\"lat\":23.13190629492595}"}
s = requests.Session()
re = s.get(url, headers=headers, stream=True)
re1 = s.post(url_getrun,stream=True, headers=headers, data=payload)
contents = re.json()
jsObj = json.dumps(contents,ensure_ascii=False ,indent=2)
f = open('bus.json', 'wb')
f.write(jsObj.encode('utf-8'))
f.close()
contents_run = re1.json()
jsObj = json.dumps(contents_run,ensure_ascii=False ,indent=2)
f = open('bus_run.json', 'wb')
f.write(jsObj.encode('utf-8'))
f.close()
| {"/app/main/forms.py": ["/app/models.py"]} |
54,368 | yohoc/busstation | refs/heads/master | /rm/bus_data.py | # -*- coding: utf-8 -*-
import requests
import json
import time
import sqlite3
# 导入:
from sqlalchemy import Column, String, create_engine, Integer
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
#模拟微信客户端header
headers = {
'Host': 'wxbus.gzyyjt.net',
'Upgrade-Insecure-Requests': '1',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, deflate',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Cookie': 'JSESSIONID=8F70E1A8D269CD7BD1925AC6E4173DFF; route=fe9b13b33d88398957ee445b97555283; gzhUser=gh_342e92a92760; openId=ouz9Msz1ISCr3XuDLljot-DDwFbo; realOpenId=ouz9Msz1ISCr3XuDLljot-DDwFbo; WBSRV=s3',
'Accept-Language': 'zh-cn',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 10_1_1 like Mac OS X) AppleWebKit/602.2.14 (KHTML, like Gecko) Mobile/14B100 MicroMessenger/6.5.1 NetType/WIFI Language/zh_CN'
}
#json数据处理函数,返回json信息
def json_data(jsdata):
del jsdata["c"]
del jsdata["lt"]
del jsdata["ft"]
bus_station = []
for i in jsdata["l"]:
bus_station.append(i["n"])
del jsdata["l"]
jsdata["station"] = bus_station
#返回处理后的json数据
return jsdata
def login(headers):
#身份认证地址
verify_url = 'http://wxbus.gzyyjt.net/wei-bus-app/route?nickName=&gzhUser=gh_342e92a92760&openId=ouz9Msz1ISCr3XuDLljot-DDwFbo'
#获取带session的登陆信息
s = requests.Session()
s.get(verify_url, headers=headers)
return s
# 创建对象的基类:
Base = declarative_base()
# 定义User对象:
class Bus(Base):
# 表的名字:
__tablename__ = 'bus'
# 表的结构:
id = Column(Integer, primary_key=True)
bus_num = Column(String(20))
route_id = Column(String(5))
direction = Column(String(1))
bus = Column(String(2000))
#数据库初始化函数
def init_db():
Base.metadata.create_all(engine)
#抓取函数
def grab(s, routeid, headers, bus_list_url, session):
#获取公交车站点的url,0,1两个方向
bus_list_url0 = 'http://wxbus.gzyyjt.net/wei-bus-app/routeStation/getByRouteAndDirection/' + routeid + '/0/'
bus_list_url1 = 'http://wxbus.gzyyjt.net/wei-bus-app/routeStation/getByRouteAndDirection/' + routeid + '/1/'
#第一个方向
print "grabing route id = " + routeid + ',direction = 0.'
bus_list_res0 = s.get(bus_list_url0, headers=headers)
try:
jsObj_buslst0 = bus_list_res0.json()
bus_info0 = json_data(jsObj_buslst0)
#写入数据库
new_bus0 = Bus(bus_num=bus_info0["rn"], route_id=routeid, direction='0', bus=str(bus_info0["station"]).replace('u\'','\'').decode('unicode-escape'))
session.add(new_bus0)
session.commit()
except Exception as e:
print e
#第二个方向
print "grabing route id = " + routeid + ',direction = 1.'
bus_list_res1 = s.get(bus_list_url1, headers=headers)
try:
jsObj_buslst1 = bus_list_res1.json()
bus_info1 = json_data(jsObj_buslst1)
#写入数据库
new_bus1 = Bus(bus_num=bus_info1["rn"], route_id=routeid, direction='1', bus=str(bus_info1["station"]).replace('u\'','\'').decode('unicode-escape'))
session.add(new_bus1)
session.commit()
except Exception as e:
print e
# 初始化数据库连接:
engine = create_engine('sqlite:///businfo.db')
# 创建DBSession类型:
DBSession = sessionmaker(bind=engine)
# 初始化数据库
init_db()
session = DBSession()
bus_list_url = 'http://wxbus.gzyyjt.net/wei-bus-app/routeStation/getByRouteAndDirection/'
#获取包含cookie的登陆会话
s = login(headers)
#抓取routeid从1到1200范围内对应的公交站点信息
for i in range(485,1200):
grab(s, str(i), headers, bus_list_url, session)
#等待0.5s后再循环,太快的话服务器会拒绝连接
time.sleep(0.5)
#提交数据库内容更改
#session.commit()
#关闭数据库连接会话
session.close()
| {"/app/main/forms.py": ["/app/models.py"]} |
54,369 | yohoc/busstation | refs/heads/master | /app/main/forms.py | # -*- coding: utf-8 -*-
from flask_wtf import Form
from wtforms import StringField, SubmitField
from wtforms.validators import Required
from wtforms import ValidationError
from ..models import Bus
class BusNum(Form):
num = StringField(u'公交路数', validators=[Required()])
submit = SubmitField(u'查询')
def validate_num(self, filed):
if not Bus.query.filter_by(bus_num= filed.data.upper() + u'路').first():
raise ValidationError(u'无此公交线路')
| {"/app/main/forms.py": ["/app/models.py"]} |
54,370 | yohoc/busstation | refs/heads/master | /rm/bus_s.py | # -*- coding: utf-8 -*-
import requests
import json
import time
headers = {
'Host': 'wxbus.gzyyjt.net',
'Upgrade-Insecure-Requests': '1',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, deflate',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Cookie': 'JSESSIONID=8F70E1A8D269CD7BD1925AC6E4173DFF; route=fe9b13b33d88398957ee445b97555283; gzhUser=gh_342e92a92760; openId=ouz9Msz1ISCr3XuDLljot-DDwFbo; realOpenId=ouz9Msz1ISCr3XuDLljot-DDwFbo; WBSRV=s3',
'Accept-Language': 'zh-cn',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 10_1_1 like Mac OS X) AppleWebKit/602.2.14 (KHTML, like Gecko) Mobile/14B100 MicroMessenger/6.5.1 NetType/WIFI Language/zh_CN'
}
#身份认证地址
verify_url = 'http://wxbus.gzyyjt.net/wei-bus-app/route?nickName=&gzhUser=gh_342e92a92760&openId=ouz9Msz1ISCr3XuDLljot-DDwFbo'
#根据公交路数获取routeid的url
route_url = 'http://wxbus.gzyyjt.net/wei-bus-app/route/getByName'
route_payload = {'name':'B8'}
s = requests.Session()
res = s.get(verify_url, headers=headers)
route = s.post(route_url, data=route_payload, headers=headers)
route_id = route.json()[0]['i']
print 'route_id :' + route_id
#route_id = '2110'
bus_list_url = 'http://wxbus.gzyyjt.net/wei-bus-app/routeStation/getByRouteAndDirection/'+route_id+'/0'
bus_run_url = 'http://wxbus.gzyyjt.net/wei-bus-app/runBus/getByRouteAndDirection/'+route_id+'/0'
#bus_list_res = s.get(bus_list_url, headers=headers)
bus_list_res = s.get(bus_run_url, headers=headers)
#print bus_list_res.text
jsObj_buslst = bus_list_res.json()
#json数据处理
'''del jsObj_buslst["c"]
del jsObj_buslst["lt"]
del jsObj_buslst["ft"]
bus_station = []
for i in jsObj_buslst["l"]:
bus_station.append(i["n"])
del jsObj_buslst["l"]
jsObj_buslst["station"] = bus_station'''
#t=1 普通 t=2 短线 t=9 快线
#jsObj = json.dumps(dict(jsObj_buslst),ensure_ascii=False, indent=2)
jsObj = json.dumps(jsObj_buslst,ensure_ascii=False, indent=2)
#print type(jsObj_buslst)
#jsObj = json.loads(jsObj_buslst)
print jsObj.encode('utf-8')
#bus_run_res = s.get(bus_run_url, headers=headers)
#print bus_run_res.json()
'''for i in range(len(jsObj_buslst)):
bl_num = len(jsObj_buslst[i]["bl"])
bbl_num = len(jsObj_buslst[i]["bbl"])
if bl_num != 0:
print "有" + str(bl_num) +"辆车靠近第" + str(i + 1) + "个站点。"
elif bbl_num != 0:
print "有" + str(bbl_num) +"辆车靠近第" + str(i + 1) + "个站点。"
else:
print "无运行车辆信息。"'''
| {"/app/main/forms.py": ["/app/models.py"]} |
54,371 | yohoc/busstation | refs/heads/master | /rm/bus_station.py | # -*- coding: utf-8 -*-
from flask import Flask, render_template, redirect, url_for, flash
from flask_script import Manager, Shell
from flask_bootstrap import Bootstrap
from flask_wtf import Form
from flask_sqlalchemy import SQLAlchemy
from wtforms import StringField, SubmitField
from wtforms.validators import Required
import os
import requests
def make_shell_context():
return dict(app=app, db=db, Bus=Bus)
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['SECRET_KEY'] = 'hard to guess string'
app.config['SQLALCHEMY_DATABASE_URI'] = \
'sqlite:///businfo.db'
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
manager = Manager(app)
bootstrap = Bootstrap(app)
db = SQLAlchemy(app)
manager.add_command("shell", Shell(make_context=make_shell_context))
# 定义User对象:
class Bus(db.Model):
# 表的名字:
__tablename__ = 'businfo'
# 表的结构:
id = db.Column(db.Integer, primary_key=True)
bus_num = db.Column(db.String(20))
route_id = db.Column(db.String(5))
direction = db.Column(db.String(1))
bus = db.Column(db.String(2000))
def __repr__(self):
return '<Bus %r>' % self.bus_num
class BusNum(Form):
num = StringField(u'公交路数', validators=[Required()])
submit = SubmitField(u'查询')
@app.route('/', methods=['GET', 'POST'])
def index():
form = BusNum()
if form.validate_on_submit():
if form.num.data == None:
flash('请输入车辆信息')
num = form.num.data
return redirect(url_for('runbus_status', busnum=num, direction='0'))
return render_template('index.html', form=form)
@app.route('/runbus/<busnum>/<direction>')
def runbus_status(busnum, direction):
busnum = busnum + u'路'
status = Bus.query.filter_by(bus_num=busnum, direction=direction).first().bus
buslist = status.replace('[','').replace(']','').replace('\'','').split(',')
headers = {
'Host': 'wxbus.gzyyjt.net',
'Upgrade-Insecure-Requests': '1',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, deflate',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Cookie': 'JSESSIONID=8F70E1A8D269CD7BD1925AC6E4173DFF; route=fe9b13b33d88398957ee445b97555283; gzhUser=gh_342e92a92760; openId=ouz9Msz1ISCr3XuDLljot-DDwFbo; realOpenId=ouz9Msz1ISCr3XuDLljot-DDwFbo; WBSRV=s3',
'Accept-Language': 'zh-cn',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 10_1_1 like Mac OS X) AppleWebKit/602.2.14 (KHTML, like Gecko) Mobile/14B100 MicroMessenger/6.5.1 NetType/WIFI Language/zh_CN'
}
#身份认证地址
verify_url = 'http://wxbus.gzyyjt.net/wei-bus-app/route?nickName=&gzhUser=gh_342e92a92760&openId=ouz9Msz1ISCr3XuDLljot-DDwFbo'
#根据公交路数获取routeid的url
route_url = 'http://wxbus.gzyyjt.net/wei-bus-app/route/getByName'
route_payload = {'name':busnum}
s = requests.Session()
res = s.get(verify_url, headers=headers)
route = s.post(route_url, data=route_payload, headers=headers)
route_id = route.json()[0]['i']
run_bus_url = 'http://wxbus.gzyyjt.net/wei-bus-app/runBus/getByRouteAndDirection/'+route_id+'/0'
run_bus_res = s.get(run_bus_url, headers=headers)
runbus = run_bus_res.json()
return render_template('runbus.html', busnum=busnum, buslist=buslist, runbus=runbus)
if __name__ == '__main__':
manager.run() | {"/app/main/forms.py": ["/app/models.py"]} |
54,372 | yohoc/busstation | refs/heads/master | /config.py | # -*- coding: utf-8 -*-
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_TRACK_MODIFICATIONS = True
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URI') or \
'sqlite:///' + os.path.join(basedir,'businfo-dev.db')
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URI') or \
'sqlite:///' + os.path.join(basedir,'businfo.db')
config = {
'development': DevelopmentConfig,
'production' : ProductionConfig,
'default': DevelopmentConfig
}
| {"/app/main/forms.py": ["/app/models.py"]} |
54,373 | yohoc/busstation | refs/heads/master | /app/models.py | # -*- coding: utf-8 -*-
from . import db
# 定义User对象:
class Bus(db.Model):
# 表的名字:
__tablename__ = 'businfo'
# 表的结构:
id = db.Column(db.Integer, primary_key=True)
bus_num = db.Column(db.String(20))
route_id = db.Column(db.String(5))
direction = db.Column(db.String(1))
bus = db.Column(db.String(2000))
def __repr__(self):
return '<Bus %r>' % self.bus_num | {"/app/main/forms.py": ["/app/models.py"]} |
54,374 | yohoc/busstation | refs/heads/master | /app/main/views.py | # -*- coding: utf-8 -*-
from flask import Flask, render_template, redirect, url_for, flash ,request
from flask_bootstrap import Bootstrap
from forms import BusNum
from flask_sqlalchemy import SQLAlchemy
from wtforms import StringField, SubmitField
from wtforms.validators import Required
import requests
from . import main
from ..models import Bus
import re
@main.route('/', methods=['GET', 'POST'])
def index():
form = BusNum()
if form.validate_on_submit():
if form.num.data == None:
flash('请输入车辆信息')
num = form.num.data.upper()
return redirect(url_for('main.runbus_status', busnum=num, direction='0'))
return render_template('index.html', form=form)
@main.route('/runbus/<busnum>/<direction>')
def runbus_status(busnum, direction):
busnum_s = busnum + u'路'
status = Bus.query.filter_by(bus_num=busnum_s, direction=direction).first().bus
buslist = status.replace('[','').replace(']','').replace('\'','').split(',')
headers = {
'Host': 'wxbus.gzyyjt.net',
'Upgrade-Insecure-Requests': '1',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, deflate',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Cookie': 'JSESSIONID=8F70E1A8D269CD7BD1925AC6E4173DFF; route=fe9b13b33d88398957ee445b97555283; gzhUser=gh_342e92a92760; openId=ouz9Msz1ISCr3XuDLljot-DDwFbo; realOpenId=ouz9Msz1ISCr3XuDLljot-DDwFbo; WBSRV=s3',
'Accept-Language': 'zh-cn',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 10_1_1 like Mac OS X) AppleWebKit/602.2.14 (KHTML, like Gecko) Mobile/14B100 MicroMessenger/6.5.1 NetType/WIFI Language/zh_CN'
}
#身份认证地址
verify_url = 'http://wxbus.gzyyjt.net/wei-bus-app/route?nickName=&gzhUser=gh_342e92a92760&openId=ouz9Msz1ISCr3XuDLljot-DDwFbo'
#根据公交路数获取routeid的url
route_url = 'http://wxbus.gzyyjt.net/wei-bus-app/route/getByName'
route_payload = {'name':busnum}
s = requests.Session()
res = s.get(verify_url, headers=headers)
route = s.post(route_url, data=route_payload, headers=headers)
route_id = route.json()[0]['i']
run_bus_url = 'http://wxbus.gzyyjt.net/wei-bus-app/runBus/getByRouteAndDirection/'+route_id+ '/' + direction
run_bus_res = s.get(run_bus_url, headers=headers)
runbus = run_bus_res.json()
return render_template('runbus.html', busnum=busnum, buslist=buslist, runbus=runbus, direction=direction)
@main.route('/runbus/reverse')
def bus_reverse():
url = str(request.path)
direction = url[-1:]
busnum = re.findall('/runbus/(.*)/', url)
print busnum
if direction == '0':
return redirect(url_for(runbus_status(busnum=busnum, direction='1')))
if direction == '1':
return redirect(url_for(runbus_status(busnum=busnum, direction='0')))
| {"/app/main/forms.py": ["/app/models.py"]} |
54,390 | erickstm87/movie_trailers | refs/heads/master | /entertainment_center.py | #!/usr/bin/python
import fresh_tomatoes
import media
Dial_M = media.Movie("Dial M for Murder", "To pull off the perfect murder", "http://www.homevideos.com/movies-covers/dialm.jpg", "https://www.youtube.com/watch?v=JWP_hrNHSN4")
Fight_Club = media.Movie("Fight Club", "Please Don't Discuss It!",'http://ia.media-imdb.com/images/M/MV5BMjIwNTYzMzE1M15BMl5BanBnXkFtZTcwOTE5Mzg3OA@@._V1_UY1200_CR88,0,630,1200_AL_.jpg','https://www.youtube.com/watch?v=SUXWAEX2jlg')
Zombieland = media.Movie("Zombieland", "A riveting tale of zombies and his struggle with women","http://www.sonypictures.com/movies/zombieland/assets/images/onesheet.jpg","https://www.youtube.com/watch?v=8m9EVP8X7N8")
O_Brother = media.Movie('O Brother Where Art Thou', 'A mans return to his home', 'https://upload.wikimedia.org/wikipedia/en/5/5b/O_brother_where_art_thou_ver1.jpg', 'https://www.youtube.com/watch?v=eW9Xo2HtlJI')
Midnight_Paris = media.Movie('Midnight in Paris', 'The Lost Generation comes to life', 'https://upload.wikimedia.org/wikipedia/en/9/9f/Midnight_in_Paris_Poster.jpg', 'https://www.youtube.com/watch?v=FAfR8omt-CY')
No_Country = media.Movie('No Country for Old Men','Brutality','http://baldmove.com/wp-content/uploads/2015/09/no-country-for-old-men.jpg','https://www.youtube.com/watch?v=38A__WT3-o0')
#print (Zombieland.trailer_youtube_url)
#O_Brother.show_trailer()
movies = [Dial_M, Fight_Club, Zombieland, O_Brother, Midnight_Paris, No_Country]
fresh_tomatoes.open_movies_page(movies)
#print (media.Movie.__doc__)
| {"/entertainment_center.py": ["/media.py"]} |
54,391 | erickstm87/movie_trailers | refs/heads/master | /media.py | #!/usr/bin/python
import webbrowser
class Movie():
valid_ratings = ['Best', 'Better', 'Incredible']
def __init__(self,movie_title,movie_storyline,poster_image_url,trailer_youtube_url):
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_image_url
self.trailer_youtube_url = trailer_youtube_url
def show_trailer(self):
webbrowser.open(self.trailer_youtube_url)
def open_poster(self):
webbrowser.open(self.poster_image_url)
| {"/entertainment_center.py": ["/media.py"]} |
54,393 | pulinau/sumo_rl_driving | refs/heads/main | /github-release/action.py | #!python3
__author__ = "Changjian Li, Aman Jhunjhunwala"
from include import *
def get_action_space():
action_space = spaces.Dict({"lane_change": spaces.Discrete(len(ActionLaneChange)),
"accel_level": spaces.Discrete(len(ActionAccel))
})
return action_space
def disable_collision_check(env, veh_id):
print("disabled")
env.tc.vehicle.setSpeedMode(veh_id, 0b00000)
env.tc.vehicle.setLaneChangeMode(veh_id, 0b0000000000)
def enable_collision_check(env, veh_id):
print("enabled")
env.tc.vehicle.setSpeedMode(veh_id, 0b11111)
env.tc.vehicle.setLaneChangeMode(veh_id, 0b011001010101)
def is_illegal_action(env, veh_id, action_dict):
""" illegal action is an action that will lead to problems such as a env.tc exception
"""
# couldChangeLane has a time lag of one step, a workaround is needed until this is fixed
#if (action_dict["lane_change"] == 1 and env.tc.vehicle.couldChangeLane(veh_id, 1) == False) or \
#(action_dict["lane_change"] == 2 and env.tc.vehicle.couldChangeLane(veh_id, -1) == False):
num_lanes_veh_edge = env.tc.edge.getLaneNumber(env.tc.vehicle.getRoadID(veh_id))
if (action_dict["lane_change"] == ActionLaneChange.LEFT and env.tc.vehicle.getLaneIndex(veh_id) == num_lanes_veh_edge - 1) or \
(action_dict["lane_change"] == ActionLaneChange.RIGHT and env.tc.vehicle.getLaneIndex(veh_id) == 0):
return True
return False
def is_invalid_action(env, veh_id, action_dict):
""" invalid action is an action that doesn't make sense, it's treated as a noop
"""
return False
def inc_speed(speed, inc, max_speed):
if (speed + inc) > max_speed:
return max_speed
else:
return speed + inc
def dec_speed(speed, dec, min_speed):
if (speed - dec) < min_speed:
return min_speed
else:
return speed - dec
def act(env, veh_id, action_dict):
""" take one simulation step with vehicles acting according to veh_id_and_action_list = [(veh_id0, action0), (veh_id1, action1), ...],
return True if an invalid action is taken or any of the vehicles collide.
"""
if veh_id not in env.tc.vehicle.getIDList():
return EnvState.DONE
# An illegal action is considered as causing a collision
if is_illegal_action(env, veh_id, action_dict):
return EnvState.CRASH
# action set to noop if it's invalid
if is_invalid_action(env, veh_id, action_dict):
action_dict = {"lane_change": ActionLaneChange.NOOP, "accel_level": ActionAccel.NOOP}
# if car is controlled by RL agent
if env.agt_ctrl == True:
# Lane Change
if action_dict["lane_change"] == ActionLaneChange.LEFT:
env.tc.vehicle.changeLane(veh_id, env.tc.vehicle.getLaneIndex(veh_id) + 1, int(env.SUMO_TIME_STEP * 1000)-1)
elif action_dict["lane_change"] == ActionLaneChange.RIGHT:
env.tc.vehicle.changeLane(veh_id, env.tc.vehicle.getLaneIndex(veh_id) - 1, int(env.SUMO_TIME_STEP * 1000)-1)
else:
pass
ego_speed = env.tc.vehicle.getSpeed(veh_id)
ego_max_speed = min(env.tc.vehicle.getAllowedSpeed(veh_id), env.MAX_VEH_SPEED)
ego_max_accel = min(env.tc.vehicle.getAccel(veh_id), env.MAX_VEH_ACCEL)
ego_max_decel = min(env.tc.vehicle.getDecel(veh_id), env.MAX_VEH_DECEL)
# Accelerate/Decelerate
accel_level = action_dict["accel_level"]
if accel_level.value > ActionAccel.NOOP.value:
ego_next_speed = inc_speed(ego_speed, (accel_level.value - ActionAccel.NOOP.value)/len(ActionAccel) * ego_max_accel * env.SUMO_TIME_STEP, ego_max_speed)
elif accel_level.value < ActionAccel.NOOP.value:
ego_next_speed = dec_speed(ego_speed, (-accel_level.value + ActionAccel.NOOP.value)/len(ActionAccel) * ego_max_decel * env.SUMO_TIME_STEP, 0)
else:
# if car is controlled by RL agent, then ActionAccel.NOOP maintains the current speed
ego_next_speed = ego_speed
env.tc.vehicle.slowDown(veh_id, ego_next_speed, int(env.SUMO_TIME_STEP * 1000)-1)
# Turn not implemented
env.tc.simulationStep()
if env.tc.simulation.getCollidingVehiclesNumber() > 0:
if veh_id in env.tc.simulation.getCollidingVehiclesIDList():
return EnvState.CRASH
# if the subject vehicle goes out of scene, set env.env_state to EnvState.DONE
if veh_id not in env.tc.vehicle.getIDList():
return EnvState.DONE
return EnvState.NORMAL
def infer_action(env):
"""When ego vehicle is controlled by sumo, the action taken in that time step need to be inferred
"""
veh_dict = env.veh_dict_hist.get(-2)
new_veh_dict = env.veh_dict_hist.get(-1)
if new_veh_dict[env.EGO_VEH_ID]["edge_id"] == veh_dict[env.EGO_VEH_ID]["edge_id"]:
if new_veh_dict[env.EGO_VEH_ID]["lane_index"] - veh_dict[env.EGO_VEH_ID]["lane_index"] == 1:
lane_change = ActionLaneChange.LEFT.value
elif new_veh_dict[env.EGO_VEH_ID]["lane_index"] - veh_dict[env.EGO_VEH_ID]["lane_index"] == -1:
lane_change = ActionLaneChange.RIGHT.value
else:
lane_change = ActionLaneChange.NOOP.value
else:
lane_change = ActionLaneChange.NOOP.value
ego_max_accel = min(env.tc.vehicle.getAccel(env.EGO_VEH_ID), env.MAX_VEH_ACCEL)
ego_max_decel = min(env.tc.vehicle.getDecel(env.EGO_VEH_ID), env.MAX_VEH_DECEL)
#print("max veh accel: ", env.tc.vehicle.getAccel(env.EGO_VEH_ID), "max accel capability: ", env.MAX_VEH_ACCEL)
#print("max veh decel: ", env.tc.vehicle.getDecel(env.EGO_VEH_ID), "max decel capability: ", env.MAX_VEH_DECEL)
#print("max veh speed: ", env.tc.vehicle.getAllowedSpeed(env.EGO_VEH_ID), "max speed capability: ", env.MAX_VEH_SPEED)
accel = (new_veh_dict[env.EGO_VEH_ID]["speed"] - veh_dict[env.EGO_VEH_ID]["speed"])/env.SUMO_TIME_STEP
if accel >= 0:
if accel/ego_max_accel <= 1/6:
accel_level = ActionAccel.NOOP.value
elif accel/ego_max_accel > 1/6 and accel/ego_max_accel <= 1/2:
accel_level = ActionAccel.MINACCEL.value
elif accel/ego_max_accel > 1/2 and accel/ego_max_accel <= 5/6:
accel_level = ActionAccel.MEDACCEL.value
else:
accel_level = ActionAccel.MAXACCEL.value
if accel < 0:
accel = -accel
if accel/ego_max_decel <= 1/6:
accel_level = ActionAccel.NOOP.value
elif accel/ego_max_accel > 1/6 and accel/ego_max_accel <= 1/2:
accel_level = ActionAccel.MINDECEL.value
elif accel/ego_max_accel > 1/2 and accel/ego_max_accel <= 5/6:
accel_level = ActionAccel.MEDDECEL.value
else:
accel_level = ActionAccel.MAXDECEL.value
action_dict = {"lane_change":ActionLaneChange(lane_change), "accel_level":ActionAccel(accel_level)}
return action_dict
| {"/main.py": ["/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py", "/dqn_cfgs.py", "/workers.py"], "/sumo_cfgs.py": ["/sumo_gym.py"], "/github-release/sumo_gym.py": ["/observation.py", "/reward.py"], "/examples.py": ["/sumo_cfgs.py", "/sumo_gym.py"], "/github-release/main.py": ["/observation.py", "/sumo_gym.py", "/dqn.py"], "/test.py": ["/sumo_cfgs.py", "/sumo_gym.py", "/observation.py"], "/workers.py": ["/observation.py", "/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py"], "/dqn.py": ["/sumo_cfgs.py", "/replay_mem.py"], "/load_pretrain.py": ["/replay_mem.py", "/dqn.py", "/dqn_cfgs.py", "/sumo_cfgs.py"], "/sumo_gym.py": ["/observation.py", "/reward.py"], "/dqn_cfgs.py": ["/sumo_cfgs.py", "/dqn.py"]} |
54,394 | pulinau/sumo_rl_driving | refs/heads/main | /main.py | #!python3
__author__ = "Changjian Li"
import argparse
from sumo_gym import *
from dqn import *
import random
import multiprocessing as mp
from sumo_cfgs import sumo_cfg
from dqn_cfgs import cfg_validity, cfg_safety, cfg_regulation, cfg_speed_comfort
from workers import run_env, run_QAgent
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--play")
parser.add_argument("--resume")
parser.add_argument("--version")
args = parser.parse_args()
env = MultiObjSumoEnv(sumo_cfg)
max_ep = 50000
sim_inst = 10
dqn_cfg_list = [cfg_validity, cfg_safety, cfg_regulation, cfg_speed_comfort]
if args.resume:
for dqn_cfg in dqn_cfg_list:
dqn_cfg.resume = True
if args.play:
for dqn_cfg in dqn_cfg_list:
dqn_cfg.play = True
max_ep = 10
sim_inst = 10
if args.version:
for dqn_cfg in dqn_cfg_list:
dqn_cfg.version = args.version
"""
if args.play != True:
with open("examples.npz", "rb") as file:
npzfile = np.load(file)
mem = npzfile[npzfile.files[0]]
pretrain_traj_list = [[(obs_dict, action, None, None, True)] for traj in mem for obs_dict, action in traj]
mem = None
npzfile = None
"""
pretrain_traj_list = []
obs_queues = [[mp.Queue(maxsize=5) for j in range(sim_inst)] for i in range(len(dqn_cfg_list))]
action_queues = [[mp.Queue(maxsize=5) for j in range(sim_inst)] for i in range(len(dqn_cfg_list))]
traj_queues = [[mp.Queue(maxsize=5) for j in range(sim_inst)] for i in range(len(dqn_cfg_list))]
try:
env_list = [mp.Process(target=run_env,
name='sumo' + str(i),
args=(sumo_cfg,
dqn_cfg_list,
[obs_q[i] for obs_q in obs_queues],
[action_q[i] for action_q in action_queues],
[traj_q[i] for traj_q in traj_queues],
args.play, max_ep, i,))
for i in range(sim_inst)]
agt_list = [mp.Process(target=run_QAgent,
name='dqn ' + dqn_cfg.name,
args=(sumo_cfg, dqn_cfg, pretrain_traj_list, obs_q_list, action_q_list, traj_q_list,
cuda_vis_devs))
for dqn_cfg, obs_q_list, action_q_list, traj_q_list, cuda_vis_devs in
zip(dqn_cfg_list, obs_queues, action_queues, traj_queues, ['', '0', '0', ''])]
[p.start() for p in env_list]
[p.start() for p in agt_list]
while any([p.is_alive() for p in env_list]):
if any([not p.is_alive() for p in agt_list]):
[p.terminate() for p in env_list]
[p.terminate() for p in agt_list]
print("dqn agt terminated")
break
[p.join() for p in env_list]
[p.terminate() for p in agt_list]
except:
raise
| {"/main.py": ["/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py", "/dqn_cfgs.py", "/workers.py"], "/sumo_cfgs.py": ["/sumo_gym.py"], "/github-release/sumo_gym.py": ["/observation.py", "/reward.py"], "/examples.py": ["/sumo_cfgs.py", "/sumo_gym.py"], "/github-release/main.py": ["/observation.py", "/sumo_gym.py", "/dqn.py"], "/test.py": ["/sumo_cfgs.py", "/sumo_gym.py", "/observation.py"], "/workers.py": ["/observation.py", "/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py"], "/dqn.py": ["/sumo_cfgs.py", "/replay_mem.py"], "/load_pretrain.py": ["/replay_mem.py", "/dqn.py", "/dqn_cfgs.py", "/sumo_cfgs.py"], "/sumo_gym.py": ["/observation.py", "/reward.py"], "/dqn_cfgs.py": ["/sumo_cfgs.py", "/dqn.py"]} |
54,395 | pulinau/sumo_rl_driving | refs/heads/main | /github-release/reward.py | #!/bin/python3
__author__ = "Changjian Li"
from include import *
def get_reward_list(env):
r_safety = get_reward_safety(env)
r_regulation = get_reward_regulation(env)
r_comfort = get_reward_comfort(env)
r_speed = get_reward_speed(env)
return [r_safety, r_regulation, r_comfort, r_speed]
def get_reward_safety(env):
if env.env_state == EnvState.CRASH:
return -1
return 0
def get_reward_regulation(env):
obs_dict = env.obs_dict_hist.get(-1)
veh_dict = env.veh_dict_hist.get(-1)
if obs_dict["ego_dist_to_end_of_lane"] < 0.5:
if obs_dict["ego_correct_lane_gap"] != 0:
return -1
for i in range(env.NUM_VEH_CONSIDERED):
if obs_dict["exists_vehicle"][i] == 1 and \
obs_dict["has_priority"][i] == 1 and \
(obs_dict["veh_relation_peer"][i] == 1 or obs_dict["veh_relation_conflict"][i] == 1) and \
obs_dict["dist_to_end_of_lane"][i] < 10 * obs_dict["speed"][i] and \
obs_dict["ego_speed"] > 0.2:
return -1
return 0
def get_reward_comfort(env):
r = 0
if env.veh_dict_hist.size < 2:
return r
if (env.veh_dict_hist.get(-1)[env.EGO_VEH_ID]["edge_id"] == env.veh_dict_hist.get(-2)[env.EGO_VEH_ID]["edge_id"] and
env.veh_dict_hist.get(-1)[env.EGO_VEH_ID]["lane_id"] != env.veh_dict_hist.get(-2)[env.EGO_VEH_ID]["lane_id"]
) or env.env_state == EnvState.CRASH:
r += -0.5
ego_max_accel = min(env.tc.vehicle.getAccel(env.EGO_VEH_ID), env.MAX_VEH_ACCEL)
ego_max_decel = min(env.tc.vehicle.getDecel(env.EGO_VEH_ID), env.MAX_VEH_DECEL)
accel = (env.veh_dict_hist.get(-1)[env.EGO_VEH_ID]["speed"] - env.veh_dict_hist.get(-2)[env.EGO_VEH_ID]["speed"])/env.SUMO_TIME_STEP
if accel > 0 and abs(accel) > env.MAX_COMFORT_ACCEL:
r += -0.5 * (abs(accel) - env.MAX_COMFORT_ACCEL)/(ego_max_accel - env.MAX_COMFORT_ACCEL)
elif accel < 0 and abs(accel) > env.MAX_COMFORT_DECEL:
r += -0.5 * (abs(accel) - env.MAX_COMFORT_DECEL)/(ego_max_decel - env.MAX_COMFORT_DECEL)
return r
def get_reward_speed(env):
ego_max_speed = min(env.tc.vehicle.getAllowedSpeed(env.EGO_VEH_ID), env.MAX_VEH_SPEED)
if env.veh_dict_hist.get(-1)[env.EGO_VEH_ID]["speed"] < ego_max_speed and \
env.action_dict_hist.get(-1)["accel_level"].value <= ActionAccel.NOOP.value:
return -1
return 0
| {"/main.py": ["/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py", "/dqn_cfgs.py", "/workers.py"], "/sumo_cfgs.py": ["/sumo_gym.py"], "/github-release/sumo_gym.py": ["/observation.py", "/reward.py"], "/examples.py": ["/sumo_cfgs.py", "/sumo_gym.py"], "/github-release/main.py": ["/observation.py", "/sumo_gym.py", "/dqn.py"], "/test.py": ["/sumo_cfgs.py", "/sumo_gym.py", "/observation.py"], "/workers.py": ["/observation.py", "/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py"], "/dqn.py": ["/sumo_cfgs.py", "/replay_mem.py"], "/load_pretrain.py": ["/replay_mem.py", "/dqn.py", "/dqn_cfgs.py", "/sumo_cfgs.py"], "/sumo_gym.py": ["/observation.py", "/reward.py"], "/dqn_cfgs.py": ["/sumo_cfgs.py", "/dqn.py"]} |
54,396 | pulinau/sumo_rl_driving | refs/heads/main | /sumo_cfgs.py | #!python3
__author__ = "Changjian Li"
from include import *
from sumo_gym import SumoCfg
# --------------------------
# SUMO
# --------------------------
SUMO_BIN = "/home/ken/project/sumo-bin/bin/sumo"
SUMO_TIME_STEP = 0.1
# "--net-file" and "route_file"
NET_XML_FILE = "/home/ken/project/sumo-rl/sumo_openai_gym/traffic/test.net.xml"
ROU_XML_FILE_LIST = ["/home/ken/project/sumo-rl/sumo_openai_gym/traffic/test" + str(i) + ".rou.xml" for i in range(4000)]
#ROU_XML_FILE_LIST = ["/home/ken/project/sumo-rl/sumo_openai_gym/traffic/test" + str(i) + ".rou.xml" for i in range(1)]
SUMO_CMD = [SUMO_BIN,
"--no-warnings", "true",
"--time-to-teleport", "-1",
"--collision.action", "warn",
"--collision.mingap-factor", "0",
"--collision.check-junctions", "true",
"--xml-validation", "never",
"--step-length", str(SUMO_TIME_STEP),
"-n", NET_XML_FILE,
#"--lanechange.duration", "2",
"-r"]
EGO_VEH_ID = "ego"
MAX_VEH_ACCEL = 2.6
MAX_VEH_DECEL = 4.5
MAX_VEH_SPEED = 14
# --------------------------
# observation
# --------------------------
NUM_LANE_CONSIDERED = 1 # number of lanes considered on each side of ego
NUM_VEH_CONSIDERED = 32
MAX_TTC_CONSIDERED = 60
OBSERVATION_RADIUS = 100
# --------------------------
# reward
# --------------------------
MAX_COMFORT_ACCEL_LEVEL = ActionAccel.MINACCEL
MAX_COMFORT_DECEL_LEVEL = ActionAccel.MINDECEL
DEFAULT_COLOR = (255, 255, 0)
YIELD_COLOR = (255, 180, 0)
sumo_cfg = SumoCfg(
# sumo
SUMO_CMD,
SUMO_TIME_STEP,
NET_XML_FILE,
ROU_XML_FILE_LIST,
EGO_VEH_ID,
MAX_VEH_ACCEL,
MAX_VEH_DECEL,
MAX_VEH_SPEED,
# observation
NUM_LANE_CONSIDERED,
NUM_VEH_CONSIDERED,
MAX_TTC_CONSIDERED,
OBSERVATION_RADIUS,
# reward
MAX_COMFORT_ACCEL_LEVEL,
MAX_COMFORT_DECEL_LEVEL,
DEFAULT_COLOR,
YIELD_COLOR) | {"/main.py": ["/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py", "/dqn_cfgs.py", "/workers.py"], "/sumo_cfgs.py": ["/sumo_gym.py"], "/github-release/sumo_gym.py": ["/observation.py", "/reward.py"], "/examples.py": ["/sumo_cfgs.py", "/sumo_gym.py"], "/github-release/main.py": ["/observation.py", "/sumo_gym.py", "/dqn.py"], "/test.py": ["/sumo_cfgs.py", "/sumo_gym.py", "/observation.py"], "/workers.py": ["/observation.py", "/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py"], "/dqn.py": ["/sumo_cfgs.py", "/replay_mem.py"], "/load_pretrain.py": ["/replay_mem.py", "/dqn.py", "/dqn_cfgs.py", "/sumo_cfgs.py"], "/sumo_gym.py": ["/observation.py", "/reward.py"], "/dqn_cfgs.py": ["/sumo_cfgs.py", "/dqn.py"]} |
54,397 | pulinau/sumo_rl_driving | refs/heads/main | /github-release/sumo_gym.py | #!python3
__author__ = "Changjian Li"
from copy import deepcopy
import random
from action import get_action_space, disable_collision_check, enable_collision_check, act, infer_action
from observation import get_observation_space, get_veh_dict, get_obs_dict
from reward import get_reward_list
from utils import class_vars
from include import *
class History:
def __init__(self, length):
self._history = []
self.size = 0
self.length = length
def add(self, current):
if self.size >= self.length:
self._history = self._history[:-1]
else:
self.size += 1
self._history += [current]
def reset(self):
self._history = []
self.size = 0
def get(self, index):
return deepcopy(self._history[index])
class SumoCfg():
def __init__(self,
# sumo
SUMO_CMD,
SUMO_TIME_STEP,
NET_XML_FILE,
EGO_VEH_ID,
MAX_VEH_ACCEL,
MAX_VEH_DECEL,
MAX_VEH_SPEED,
# observation
NUM_LANE_CONSIDERED,
NUM_VEH_CONSIDERED,
OBSERVATION_RADIUS,
# reward
MAX_COMFORT_ACCEL,
MAX_COMFORT_DECEL):
self.SUMO_CMD = SUMO_CMD
self.SUMO_TIME_STEP = SUMO_TIME_STEP
self.NET_XML_FILE = NET_XML_FILE
self.EGO_VEH_ID = EGO_VEH_ID
self.MAX_VEH_ACCEL = MAX_VEH_ACCEL
self.MAX_VEH_DECEL = MAX_VEH_DECEL
self.MAX_VEH_SPEED = MAX_VEH_SPEED
self.NUM_LANE_CONSIDERED = NUM_LANE_CONSIDERED
self.NUM_VEH_CONSIDERED = NUM_VEH_CONSIDERED
self.OBSERVATION_RADIUS = OBSERVATION_RADIUS
self.MAX_COMFORT_ACCEL = MAX_COMFORT_ACCEL
self.MAX_COMFORT_DECEL = MAX_COMFORT_DECEL
class SumoGymEnv(gym.Env):
"""SUMO environment"""
def __init__(self, config):
_attrs = class_vars(config)
for _attr in _attrs:
setattr(self, _attr, getattr(config, _attr))
self.action_space = get_action_space()
self.obsevation_space = get_observation_space(self)
self.env_state = EnvState.NOT_STARTED
self._agt_ctrl = False # if the ego car is controlled by RL agent
self.veh_dict_hist = History(2)
self.obs_dict_hist = History(2)
self.action_dict_hist = History(2)
try:
sim_label = "sim" + str(random.randint(0, 65536))
traci.start(self.SUMO_CMD, label = sim_label)
self.tc = traci.getConnection(sim_label)
except (traci.FatalTraCIError, traci.TraCIException):
self.env_state = EnvState.ERROR
raise
@property
def agt_ctrl(self):
return self._agt_ctrl
@agt_ctrl.setter
def agt_ctrl(self, value):
if value == True:
disable_collision_check(self, self.EGO_VEH_ID)
self._agt_ctrl = value
elif value == False:
enable_collision_check(self, self.EGO_VEH_ID)
self._agt_ctrl = value
else:
raise ValueError("SumoGymEnv.agt_ctrl must be either True or False")
def step(self, action):
obs = 0
done = 0
reward = 0
info = None
return obs, reward, done, info
def reset(self):
self.action_dict_hist.reset()
self.veh_dict_hist.reset()
self.obs_dict_hist.reset()
try:
self.tc.load(self.SUMO_CMD[1:])
# 1st time step starts the simulation,
# 2nd makes sure that all initial vehicles (departure time < SUMO_TIME_STEP) are in scene
self.tc.simulationStep()
self.tc.simulationStep()
self.veh_dict_hist.add(get_veh_dict(self))
self.obs_dict_hist.add(get_obs_dict(self))
self.agt_ctrl = True
self.env_state = EnvState.NORMAL
return get_obs_dict(self)
except (traci.FatalTraCIError, traci.TraCIException):
self.env_state = EnvState.ERROR
raise
def close(self):
try:
self.tc.close()
except (traci.FatalTraCIError, traci.TraCIException):
self.env_state = EnvState.ERROR
raise
class MultiObjSumoEnv(SumoGymEnv):
def step(self, action_dict):
assert self.env_state == EnvState.NORMAL, "env.env_state is not EnvState.NORMAL"
try:
self.env_state = act(self, self.EGO_VEH_ID, action_dict)
if self.env_state == EnvState.DONE:
obs_dict = self.obs_dict_hist.get(-1)
veh_dict = self.veh_dict_hist.get(-1)
else:
obs_dict = get_obs_dict(self)
veh_dict = get_veh_dict(self)
if self.agt_ctrl == False:
action_dict = infer_action(self)
self.action_dict_hist.add(action_dict)
self.veh_dict_hist.add(veh_dict)
self.obs_dict_hist.add(obs_dict)
except (traci.FatalTraCIError, traci.TraCIException):
self.env_state = EnvState.ERROR
raise
info = action_dict
return obs_dict, get_reward_list(self), self.env_state, info
| {"/main.py": ["/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py", "/dqn_cfgs.py", "/workers.py"], "/sumo_cfgs.py": ["/sumo_gym.py"], "/github-release/sumo_gym.py": ["/observation.py", "/reward.py"], "/examples.py": ["/sumo_cfgs.py", "/sumo_gym.py"], "/github-release/main.py": ["/observation.py", "/sumo_gym.py", "/dqn.py"], "/test.py": ["/sumo_cfgs.py", "/sumo_gym.py", "/observation.py"], "/workers.py": ["/observation.py", "/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py"], "/dqn.py": ["/sumo_cfgs.py", "/replay_mem.py"], "/load_pretrain.py": ["/replay_mem.py", "/dqn.py", "/dqn_cfgs.py", "/sumo_cfgs.py"], "/sumo_gym.py": ["/observation.py", "/reward.py"], "/dqn_cfgs.py": ["/sumo_cfgs.py", "/dqn.py"]} |
54,398 | pulinau/sumo_rl_driving | refs/heads/main | /examples.py | #!python3
__author__ = "Changjian Li"
from sumo_cfgs import sumo_cfg
from sumo_gym import *
import multiprocessing as mp
import random
def run_env(sumo_cfg, max_ep):
id = random.randint(0, 65536)
env = MultiObjSumoEnv(sumo_cfg)
mem = []
for ep in range(max_ep):
print("env id: {}".format(id), "episode: {}/{}".format(ep, max_ep))
obs_dict = env.reset()
traj = []
max_step = 2000
for step in range(max_step):
env.agt_ctrl = False
action = 0
next_obs_dict, _, env_state, action_dict = env.step(
{"lane_change": ActionLaneChange(action // 7), "accel_level": ActionAccel(action % 7)})
action = action_dict["lane_change"].value * 7 + action_dict["accel_level"].value
print(action)
traj.append((obs_dict, action))
obs_dict = next_obs_dict
if env_state == EnvState.DONE:
print("Ego ", id, " drove out of scene, step: ", step)
break
if env_state != EnvState.NORMAL or step == max_step - 1:
print("Ego ", id, " crashed, step: ", step)
break
mem.append(traj)
env.close()
return mem
if __name__ == "__main__":
max_ep = 100
num_sim = 10
p= mp.Pool(num_sim)
mem_list = p.starmap(run_env, [(sumo_cfg, max_ep)] * num_sim)
mem = []
for x in mem_list:
mem += x
with open("examples.npz", "wb+") as file:
np.savez(file, mem)
file.seek(0) | {"/main.py": ["/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py", "/dqn_cfgs.py", "/workers.py"], "/sumo_cfgs.py": ["/sumo_gym.py"], "/github-release/sumo_gym.py": ["/observation.py", "/reward.py"], "/examples.py": ["/sumo_cfgs.py", "/sumo_gym.py"], "/github-release/main.py": ["/observation.py", "/sumo_gym.py", "/dqn.py"], "/test.py": ["/sumo_cfgs.py", "/sumo_gym.py", "/observation.py"], "/workers.py": ["/observation.py", "/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py"], "/dqn.py": ["/sumo_cfgs.py", "/replay_mem.py"], "/load_pretrain.py": ["/replay_mem.py", "/dqn.py", "/dqn_cfgs.py", "/sumo_cfgs.py"], "/sumo_gym.py": ["/observation.py", "/reward.py"], "/dqn_cfgs.py": ["/sumo_cfgs.py", "/dqn.py"]} |
54,399 | pulinau/sumo_rl_driving | refs/heads/main | /github-release/main.py | #!python3
__author__ = "Changjian Li"
from action import *
from observation import *
from sumo_gym import *
import tensorflow as tf
import keras
from keras.layers import Input, Dense, Conv1D, Lambda
from keras.models import Model, Sequential
from keras.optimizers import Adam
from keras import utils
from dqn import *
import random
# --------------------------
# SUMO
# --------------------------
SUMO_TOOLS_DIR = "/home/ken/project/sumo-bin/tools"
SUMO_BIN = "/home/ken/project/sumo-bin/bin/sumo"
SUMO_CONFIG = "/home/ken/project/sumo-rl/sumo_openai_gym/traffic/test.sumocfg"
SUMO_TIME_STEP = 0.1
SUMO_CMD = [SUMO_BIN, "-c", SUMO_CONFIG,
"--time-to-teleport", "-1",
"--collision.action", "none",
"--collision.check-junctions", "true",
"--step-length", str(SUMO_TIME_STEP)]
# "--lanechange.duration", "2"]
NET_XML_FILE = "/home/ken/project/sumo-rl/sumo_openai_gym/traffic/test.net.xml"
EGO_VEH_ID = "ego"
MAX_VEH_ACCEL = 2.6
MAX_VEH_DECEL = 4.5
MAX_VEH_SPEED = 55.55
# --------------------------
# observation
# --------------------------
NUM_LANE_CONSIDERED = 3 # number of lanes considered on each side of ego
NUM_VEH_CONSIDERED = 16
OBSERVATION_RADIUS = 600
# --------------------------
# reward
# --------------------------
MAX_COMFORT_ACCEL = 2.0
MAX_COMFORT_DECEL = 2.0
def build_model_safety(sumo_cfg, dqn_cfg):
ego_input = Input(shape=(4, ))
env_input = Input(shape=(10*sumo_cfg.NUM_VEH_CONSIDERED, 1))
l1_0 = Dense(16, activation = None)(ego_input)
l1_1 = Conv1D(16, kernel_size = 10,
strides = 10, padding = 'valid',
activation = None)(env_input)
l1_1 = Lambda(lambda x: tf.reduce_sum(x, axis=1))(l1_1)
l1 = keras.layers.add([l1_0, l1_1])
l1 = keras.layers.Activation(activation="relu")(l1)
l2 = Dense(16, activation='sigmoid')(l1)
l3 = Dense(16, activation='sigmoid')(l2)
y = Dense(dqn_cfg.action_size, activation='linear')(l3)
model = Model(inputs = [ego_input, env_input], outputs=y)
model.compile(loss='mse',
optimizer=Adam(lr=0.001))
return model
def reshape_safety(sumo_cfg, obs_dict):
"""reshape gym observation to keras neural network input"""
out0 = np.array([], dtype = np.float32)
out0 = np.append(out0, np.array(obs_dict["ego_speed"])/sumo_cfg.MAX_VEH_SPEED)
out0 = np.append(out0, np.array(obs_dict["ego_dist_to_end_of_lane"])/sumo_cfg.OBSERVATION_RADIUS)
out0 = np.append(out0, np.array(obs_dict["ego_exists_left_lane"]))
out0 = np.append(out0, np.array(obs_dict["ego_exists_right_lane"]))
out1 = np.reshape(np.array([], dtype = np.float32), (0, sumo_cfg.NUM_VEH_CONSIDERED))
out1 = np.append(out1, np.array([obs_dict["exists_vehicle"]]), axis=0)
out1 = np.append(out1, np.array([obs_dict["speed"]])/sumo_cfg.MAX_VEH_SPEED, axis=0)
out1 = np.append(out1, np.array([obs_dict["dist_to_end_of_lane"]])/sumo_cfg.OBSERVATION_RADIUS, axis=0)
out1 = np.append(out1, np.array(obs_dict["relative_position"]).T/sumo_cfg.OBSERVATION_RADIUS, axis=0)
out1 = np.append(out1, np.array([obs_dict["relative_heading"]])/pi, axis=0)
out1 = np.append(out1, np.array([obs_dict["veh_relation_left"]]), axis=0)
out1 = np.append(out1, np.array([obs_dict["veh_relation_right"]]), axis=0)
out1 = np.append(out1, np.array([obs_dict["veh_relation_ahead"]]), axis=0)
out1 = np.append(out1, np.array([obs_dict["veh_relation_behind"]]), axis=0)
return [np.reshape(out0, (1,) + out0.shape), np.reshape(out1.T, (1, -1, 1))]
def build_model_regulation(sumo_cfg, dqn_cfg):
ego_input = Input(shape=(6 + 2*sumo_cfg.NUM_LANE_CONSIDERED, ))
env_input = Input(shape=(16*sumo_cfg.NUM_VEH_CONSIDERED, 1))
l1_0 = Dense(24, activation = None)(ego_input)
l1_1 = Conv1D(24, kernel_size = 16, strides = 16, padding = 'valid',
activation = None)(env_input)
l1_1 = Lambda(lambda x: tf.reduce_sum(x, axis=1))(l1_1)
l1 = keras.layers.add([l1_0, l1_1])
l1 = keras.layers.Activation(activation="relu")(l1)
l2 = Dense(16, activation='sigmoid')(l1)
l3 = Dense(16, activation='sigmoid')(l2)
y = Dense(dqn_cfg.action_size, activation='linear')(l3)
model = Model(inputs = [ego_input, env_input], outputs=y)
model.compile(loss='mse',
optimizer=Adam(lr=0.001))
return model
def reshape_regulation(sumo_cfg, obs_dict):
out0 = np.array([], dtype = np.float32)
out0 = np.append(out0, np.array(obs_dict["ego_speed"])/sumo_cfg.MAX_VEH_SPEED)
out0 = np.append(out0, np.array(obs_dict["ego_dist_to_end_of_lane"])/sumo_cfg.OBSERVATION_RADIUS)
out0 = np.append(out0, np.array(obs_dict["ego_in_intersection"]))
out0 = np.append(out0, np.array(obs_dict["ego_exists_left_lane"]))
out0 = np.append(out0, np.array(obs_dict["ego_exists_right_lane"]))
out0 = np.append(out0, utils.to_categorical(obs_dict["ego_correct_lane_gap"] + sumo_cfg.NUM_LANE_CONSIDERED,
2*sumo_cfg.NUM_LANE_CONSIDERED + 1))
out1 = np.reshape(np.array([], dtype = np.float32), (0, sumo_cfg.NUM_VEH_CONSIDERED))
out1 = np.append(out1, np.array([obs_dict["exists_vehicle"]]), axis=0)
out1 = np.append(out1, np.array([obs_dict["speed"]])/sumo_cfg.MAX_VEH_SPEED, axis=0)
out1 = np.append(out1, np.array([obs_dict["dist_to_end_of_lane"]])/sumo_cfg.OBSERVATION_RADIUS, axis=0)
out1 = np.append(out1, np.array([obs_dict["in_intersection"]]), axis=0)
out1 = np.append(out1, np.array(obs_dict["relative_position"]).T/sumo_cfg.OBSERVATION_RADIUS, axis=0)
out1 = np.append(out1, np.array([obs_dict["relative_heading"]])/pi, axis=0)
out1 = np.append(out1, np.array([obs_dict["has_priority"]]), axis=0)
out1 = np.append(out1, np.array([obs_dict["veh_relation_peer"]]), axis=0)
out1 = np.append(out1, np.array([obs_dict["veh_relation_conflict"]]), axis=0)
out1 = np.append(out1, np.array([obs_dict["veh_relation_next"]]), axis=0)
out1 = np.append(out1, np.array([obs_dict["veh_relation_prev"]]), axis=0)
out1 = np.append(out1, np.array([obs_dict["veh_relation_left"]]), axis=0)
out1 = np.append(out1, np.array([obs_dict["veh_relation_right"]]), axis=0)
out1 = np.append(out1, np.array([obs_dict["veh_relation_ahead"]]), axis=0)
out1 = np.append(out1, np.array([obs_dict["veh_relation_behind"]]), axis=0)
return [np.reshape(out0, (1, -1)), np.reshape(out1.T, (1, -1, 1))]
def build_model_comfort(sumo_cfg, dqn_cfg):
model = Sequential()
model.add(Dense(8, input_dim=dqn_cfg.state_size, activation='sigmoid'))
model.add(Dense(8, activation='sigmoid'))
model.add(Dense(dqn_cfg.action_size, activation='linear'))
model.compile(loss='mse',
optimizer=Adam(lr=0.001))
return model
def reshape_comfort(sumo_cfg, obs_dict):
return np.reshape(np.array([0], dtype = np.float32), (1, -1))
def build_model_speed(sumo_cfg, dqn_cfg):
model = Sequential()
model.add(Dense(8, input_dim=dqn_cfg.state_size, activation='sigmoid'))
model.add(Dense(8, activation='sigmoid'))
model.add(Dense(dqn_cfg.action_size, activation='linear'))
model.compile(loss='mse',
optimizer=Adam(lr=0.001))
return model
def reshape_speed(sumo_cfg, obs_dict):
return np.reshape(np.array(obs_dict["ego_speed"]/sumo_cfg.MAX_VEH_SPEED, dtype = np.float32), (1, -1))
action_size = len(ActionLaneChange) * len(ActionAccel)
cfg_safety = DQNCfg(name = "safety",
state_size = 4 + 10*NUM_VEH_CONSIDERED,
action_size = action_size,
gamma = 0.99,
epsilon = 0.2,
threshold = -0.1,
memory_size = 6400,
_build_model = build_model_safety,
reshape = reshape_safety)
cfg_regulation = DQNCfg(name = "regulation",
state_size = 6 + 2*NUM_LANE_CONSIDERED + 16*NUM_VEH_CONSIDERED,
action_size = action_size,
gamma = 0.99,
epsilon = 0.2,
threshold = -0.2,
memory_size = 6400,
_build_model = build_model_regulation,
reshape = reshape_regulation)
cfg_comfort = DQNCfg(name = "comfort",
state_size = 1,
action_size = action_size,
gamma = 0,
epsilon = 0.2,
threshold = -0.4,
memory_size = 640,
_build_model = build_model_comfort,
reshape = reshape_comfort)
cfg_speed = DQNCfg(name = "speed",
state_size = 1,
action_size = action_size,
gamma = 0,
epsilon = 0.1,
threshold = -0.4,
memory_size = 640,
_build_model = build_model_speed,
reshape = reshape_speed)
sumo_cfg = SumoCfg(
# sumo
SUMO_CMD,
SUMO_TIME_STEP,
NET_XML_FILE,
EGO_VEH_ID,
MAX_VEH_ACCEL,
MAX_VEH_DECEL,
MAX_VEH_SPEED,
# observation
NUM_LANE_CONSIDERED,
NUM_VEH_CONSIDERED,
OBSERVATION_RADIUS,
# reward
MAX_COMFORT_ACCEL,
MAX_COMFORT_DECEL)
env = MultiObjSumoEnv(sumo_cfg)
agent_list = [DQNAgent(sumo_cfg, cfg_safety), DQNAgent(sumo_cfg, cfg_regulation), DQNAgent(sumo_cfg, cfg_comfort), DQNAgent(sumo_cfg, cfg_speed)]
# agent.load("./save/cartpole-dqn.h5")
env_state = EnvState.NORMAL
batch_size = 64
EPISODES = 10000
for e in range(EPISODES):
obs_dict = env.reset()
state_list = [agt.reshape(sumo_cfg, obs_dict) for agt in agent_list]
for step in range(6400):
print(step, env.agt_ctrl)
action_set = set(range(action_size))
for agt, state in zip(agent_list, state_list):
action_set = agt.get_action_set(state, action_set)
if len(action_set) >= 1:
action = random.sample(action_set, 1)[0]
else:
print("*****************ERROR************************")
next_obs_dict, reward_list, env_state, action_dict = env.step({"lane_change":ActionLaneChange(action//7), "accel_level":ActionAccel(action%7)})
action = action_dict["lane_change"].value*7 + action_dict["accel_level"].value
next_state_list = [agt.reshape(sumo_cfg, next_obs_dict) for agt in agent_list]
for agt, state, reward, next_state in zip(agent_list, state_list, reward_list, next_state_list):
agt.remember(state, action, reward, next_state, env_state)
agt.learn(state, action, reward, next_state, env_state)
print(action_dict, reward_list, env_state)
if random.uniform(0, 1) < 0.1:
if env.agt_ctrl == True:
env.agt_ctrl = False
else:
env.agt_ctrl = True
if env_state != EnvState.NORMAL:
print("episode: {}/{}, step: {}"
.format(e, EPISODES, step))
break
for agt in agent_list:
if len(agt.memory) > batch_size:
agt.replay(batch_size)
if e % 10 == 0:
agt.save()
state_list = next_state_list
#print("memory: ", agt.memory)
#print("lane_change: ", ActionLaneChange(action//7), "accel_level: ", ActionAccel(action%7))
| {"/main.py": ["/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py", "/dqn_cfgs.py", "/workers.py"], "/sumo_cfgs.py": ["/sumo_gym.py"], "/github-release/sumo_gym.py": ["/observation.py", "/reward.py"], "/examples.py": ["/sumo_cfgs.py", "/sumo_gym.py"], "/github-release/main.py": ["/observation.py", "/sumo_gym.py", "/dqn.py"], "/test.py": ["/sumo_cfgs.py", "/sumo_gym.py", "/observation.py"], "/workers.py": ["/observation.py", "/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py"], "/dqn.py": ["/sumo_cfgs.py", "/replay_mem.py"], "/load_pretrain.py": ["/replay_mem.py", "/dqn.py", "/dqn_cfgs.py", "/sumo_cfgs.py"], "/sumo_gym.py": ["/observation.py", "/reward.py"], "/dqn_cfgs.py": ["/sumo_cfgs.py", "/dqn.py"]} |
54,400 | pulinau/sumo_rl_driving | refs/heads/main | /test.py | from include import *
from sumo_cfgs import *
from sumo_gym import *
from observation import *
from action import *
SUMO_BIN = "/home/ken/project/sumo-bin/bin/sumo-gui"
NET_XML_FILE = "/home/ken/project/sumo-rl/sumo_openai_gym/roundabout/roundabout.net.xml"
ROU_XML_FILE_LIST = ["/home/ken/project/sumo-rl/sumo_openai_gym/roundabout/test" + str(i) + ".rou.xml" for i in range(1)]
SUMO_CMD = [SUMO_BIN,
#"-c", "/home/ken/project/sumo-rl/sumo_openai_gym/traffic/test.sumocfg",
"--no-warnings", "true",
"--time-to-teleport", "-1",
"--collision.action", "warn",
"--collision.check-junctions", "true",
"--xml-validation", "never",
"--step-length", str(SUMO_TIME_STEP),
"-n", NET_XML_FILE,
"-r"]
sumo_cfg = SumoCfg(
# sumo
SUMO_CMD,
SUMO_TIME_STEP,
NET_XML_FILE,
ROU_XML_FILE_LIST,
EGO_VEH_ID,
MAX_VEH_ACCEL,
MAX_VEH_DECEL,
MAX_VEH_SPEED,
# observation
NUM_LANE_CONSIDERED,
NUM_VEH_CONSIDERED,
MAX_TTC_CONSIDERED,
OBSERVATION_RADIUS,
# reward
MAX_COMFORT_ACCEL_LEVEL,
MAX_COMFORT_DECEL_LEVEL,
DEFAULT_COLOR,
YIELD_COLOR)
sumo_cfg.SUMO_CMD = SUMO_CMD
env = MultiObjSumoEnv(sumo_cfg)
obs = env.reset(0)
env.agt_ctrl = False
for _ in range(600):
obs, reward_list, env_state, action_dict = \
env.step({"lane_change": ActionLaneChange.NOOP, "accel_level": ActionAccel.NOOP})
if env_state != EnvState.NORMAL:
env.reset(0)
print(obs) | {"/main.py": ["/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py", "/dqn_cfgs.py", "/workers.py"], "/sumo_cfgs.py": ["/sumo_gym.py"], "/github-release/sumo_gym.py": ["/observation.py", "/reward.py"], "/examples.py": ["/sumo_cfgs.py", "/sumo_gym.py"], "/github-release/main.py": ["/observation.py", "/sumo_gym.py", "/dqn.py"], "/test.py": ["/sumo_cfgs.py", "/sumo_gym.py", "/observation.py"], "/workers.py": ["/observation.py", "/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py"], "/dqn.py": ["/sumo_cfgs.py", "/replay_mem.py"], "/load_pretrain.py": ["/replay_mem.py", "/dqn.py", "/dqn_cfgs.py", "/sumo_cfgs.py"], "/sumo_gym.py": ["/observation.py", "/reward.py"], "/dqn_cfgs.py": ["/sumo_cfgs.py", "/dqn.py"]} |
54,401 | pulinau/sumo_rl_driving | refs/heads/main | /traffic/generate.py | #!python3
import subprocess
import random
import os
randomTrip = "~/project/sumo-bin/tools/randomTrips.py"
#mean = "4"
#" --binomial=" + mean + \
for i in range(3000, 4000):
period = random.randrange(1, 4)/500
temp0_file_name = "temp0.rou.xml"
randomTripArg = " -n test.net.xml -r " + temp0_file_name + \
" -e 0.1" + \
" -p " + str(period) + \
' --fringe-factor 10 --trip-attributes="departLane=\\"random\\" departSpeed=\\"random\\" departPos=\\"random_free\\""'
subprocess.run(randomTrip + randomTripArg, shell=True, check=True)
os.remove("trips.trips.xml")
period = random.randrange(1, 10)/5
temp1_file_name = "temp1.rou.xml"
randomTripArg = " -n test.net.xml -r " + temp1_file_name + \
' --prefix 1_' + \
" -b 0.2 -e 640" + \
" -p " + str(period) + \
' --fringe-factor 10 --trip-attributes="departLane=\\"random\\" departSpeed=\\"random\\" departPos=\\"base\\""'
subprocess.run(randomTrip + randomTripArg, shell=True, check=True)
with open(temp0_file_name, "r+") as fd_in0, open(temp1_file_name, "r+") as fd_in1, open("test"+str(i)+".rou.xml", "w+") as fd_out:
contents0 = fd_in0.readlines()
contents1 = fd_in1.readlines()
contents = contents0[:-1] + contents1[33:]
contents.insert(33, ' <vType id="EgoCar" color="0,1,0"/>\n')
contents[34] = ' <vehicle id="ego" depart="0.00" departLane="random" departPos="random" departSpeed="random" type="EgoCar">\n'
fd_out.writelines(contents)
| {"/main.py": ["/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py", "/dqn_cfgs.py", "/workers.py"], "/sumo_cfgs.py": ["/sumo_gym.py"], "/github-release/sumo_gym.py": ["/observation.py", "/reward.py"], "/examples.py": ["/sumo_cfgs.py", "/sumo_gym.py"], "/github-release/main.py": ["/observation.py", "/sumo_gym.py", "/dqn.py"], "/test.py": ["/sumo_cfgs.py", "/sumo_gym.py", "/observation.py"], "/workers.py": ["/observation.py", "/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py"], "/dqn.py": ["/sumo_cfgs.py", "/replay_mem.py"], "/load_pretrain.py": ["/replay_mem.py", "/dqn.py", "/dqn_cfgs.py", "/sumo_cfgs.py"], "/sumo_gym.py": ["/observation.py", "/reward.py"], "/dqn_cfgs.py": ["/sumo_cfgs.py", "/dqn.py"]} |
54,402 | pulinau/sumo_rl_driving | refs/heads/main | /workers.py | #!python3
__author__ = "Changjian Li"
from include import *
from action import *
from observation import *
from sumo_gym import *
from dqn import *
import random
random.seed()
import multiprocessing as mp
import queue
from copy import deepcopy
import os
from sumo_cfgs import sumo_cfg
class returnX():
def __init__(self, x):
self.x = x
def __call__(self, *args, **kwargs):
return self.x
from collections import deque
class decreaseProb():
def __init__(self, alpha, beta):
self.alpha = alpha
self.beta = beta
def __call__(self, x):
return 1 / (1 + np.exp(self.alpha * (x - beta)))
def run_env(sumo_cfg, dqn_cfg_list, obs_q_list, action_q_list, traj_q_list, play, max_ep, id):
try:
max_step = 3200
env = MultiObjSumoEnv(sumo_cfg)
violation_safety_hist = []
violation_yield_hist = []
violation_turn_hist = []
for ep in range(max_ep):
print("env id: {}".format(id), "episode: {}/{}".format(ep, max_ep))
if play:
init_step = 0
model_index_list = [None] * len(dqn_cfg_list)
else:
init_step = random.randrange(60)
model_index_list = [None] * len(dqn_cfg_list)
for i in range(len(dqn_cfg_list)):
if len(dqn_cfg_list[i].model_rst_prob_list) > 0:
model_index_list[i] = random.randrange(len(dqn_cfg_list[i].model_rst_prob_list))
obs_dict = env.reset(init_step)
traj = []
for step in range(max_step):
violated_safety = False
violated_yield = False
violated_turn = False
if step == 0:
if play:
env.agt_ctrl = True
"""
else:
if random.uniform(0, 1) < 0.1:
env.agt_ctrl = False
else:
if not play:
if random.uniform(0, 1) < 0.5:
if env.agt_ctrl == False:
env.agt_ctrl = True
else:
env.agt_ctrl = False
"""
# select action
if step == 0:
if env.agt_ctrl == False:
action = 0
action_info = "sumo"
else:
action_set_list, sorted_idx_list = [], []
for obs_q, model_index in zip(obs_q_list, model_index_list):
obs_q.put((deepcopy(obs_dict), model_index))
for action_q in action_q_list:
while True:
try:
(action_set, sorted_idx) = action_q.get(block=False)
break
except queue.Empty:
continue
action_set_list += [action_set]
sorted_idx_list += [sorted_idx]
is_explr_list = [False] * len(dqn_cfg_list)
i = random.randrange(len(dqn_cfg_list))
important = False
if not play and random.random() < dqn_cfg_list[i].epsilon:
is_explr_list[i] = True
important = True
action, action_info = select_action(dqn_cfg_list, is_explr_list, action_set_list, sorted_idx_list, 1)
else:
action = next_action
action_info = next_action_info
important = next_important
if action == 7:
action_full = 10
elif action == 8:
action_full = 17
else:
action_full = action
next_obs_dict, (reward_list, done_list, violation_list), env_state, action_dict = \
env.step({"lane_change": ActionLaneChange(action_full // len(ActionAccel)),
"accel_level": ActionAccel(action_full % len(ActionAccel))})
action_full = action_dict["lane_change"].value * len(ActionAccel) + action_dict["accel_level"].value
if action_full >= len(ActionAccel) and action_full < 2 * len(ActionAccel):
action = 7
if action_full >= 2 * len(ActionAccel):
action = 8
violated_safety = violated_safety or violation_list[0]
violated_yield = violated_yield or violation_list[1]
violated_turn = violated_turn or violation_list[2]
if True: # play:
print("action: ", action)
if env.agt_ctrl == False:
action_info = "sumo"
if step % 1 == 0 or action >= len(ActionAccel):
# choose tentative actions for each objective
for obs_q in obs_q_list:
obs_q.put((deepcopy(next_obs_dict), None))
action_set_list, sorted_idx_list = [], []
tent_action_list = []
tent_action_info_list = []
for i, action_q in enumerate(action_q_list):
while True:
try:
(action_set, sorted_idx) = action_q.get(block=False)
break
except queue.Empty:
continue
action_set_list += [action_set]
sorted_idx_list += [sorted_idx]
tent_action, tent_action_info = select_action(dqn_cfg_list[:i + 1], [False] * (i + 1), action_set_list,
sorted_idx_list, 1)
tent_action_list += [tent_action]
tent_action_info_list += [tent_action_info]
# choose next action using model[model_index]
action_set_list, sorted_idx_list = [], []
for obs_q, model_index in zip(obs_q_list, model_index_list):
obs_q.put((deepcopy(next_obs_dict), model_index))
for action_q in action_q_list:
while True:
try:
(action_set, sorted_idx) = action_q.get(block=False)
break
except queue.Empty:
continue
action_set_list += [action_set]
sorted_idx_list += [sorted_idx]
is_explr_list = [False] * len(dqn_cfg_list)
next_important = False
i = random.randrange(len(dqn_cfg_list))
if not play and random.random() < dqn_cfg_list[i].epsilon:
is_explr_list[i] = True
next_important = True
next_action, next_action_info = select_action(dqn_cfg_list, is_explr_list, action_set_list, sorted_idx_list,
1)
if env_state != EnvState.DONE:
traj.append((obs_dict, action, reward_list, next_obs_dict, tent_action_list, done_list, important))
obs_dict = next_obs_dict
if env_state == EnvState.DONE:
prob = returnX(1)
print("Sim ", id, " success, step: ", step)
break
if env_state != EnvState.NORMAL:
prob = returnX(1)
print("Sim ", id, " terminated, step: ", step, action_dict, action_info, reward_list, done_list, env_state,
env.agt_ctrl)
break
if step == max_step - 1:
prob = returnX(1)
print("Sim ", id, " timeout, step: ", step)
violated_yield = True
break
for i, traj_q in enumerate(traj_q_list):
traj_q.put(
([deepcopy((obs_dict, action, reward_list[i], next_obs_dict, tent_action_list[i], done_list[i], important))
for obs_dict, action, reward_list, next_obs_dict, tent_action_list, done_list, important in traj],
prob))
violation_safety_hist += [violated_safety]
violation_yield_hist += [violated_yield]
violation_turn_hist += [violated_turn]
except:
raise
finally:
f = open("result" + str(id), "a")
f.writelines(["safety violation: " + str(violation_safety_hist) + "\n"])
f.writelines(["regulation violation (yield): " + str(violation_yield_hist) + "\n"])
f.writelines(["regulation violation (turn): " + str(violation_turn_hist) + "\n"])
f.close()
def select_action(dqn_cfg_list, is_explr_list, action_set_list, sorted_idx_list, num_action, greedy=False):
"""
Select an action based on the action choice of each objective.
:param dqn_cfg_list:
:param action_set_list: list of "good enough" actions of each objective
:param explr_set_list: list of actions each objective want to explore
:param sorted_idx_list: list of sorted actions based on (descending) desirability of each objective,
used in case there's no "good enough" action that satisfies all objectives
:param num_action: the least num of action that's assumed to exist
:return: action
"""
valid = set(range(reduced_action_size))
for action_set, is_explr, sorted_idx, dqn_cfg in zip(action_set_list, is_explr_list, sorted_idx_list, dqn_cfg_list):
if is_explr:
return (random.sample(valid, 1)[0], "explr: " + dqn_cfg.name)
invalid = valid - action_set
valid = valid & action_set
if len(valid) < num_action:
invalid = [(sorted_idx.index(x), x) for x in invalid]
invalid = sorted(invalid)[:num_action - len(valid)]
invalid = set([x[1] for x in invalid])
invalid = [(x, "compromise: " + dqn_cfg.name) for x in invalid]
break
else:
invalid = []
if greedy:
if len(valid) == 0:
return invalid[0]
else:
valid = [(sorted_idx.index(x), x) for x in valid]
valid = [(sorted(valid)[0][1], "greedy: " + dqn_cfg.name)]
return valid[0]
valid = [(x, "exploit: " + dqn_cfg.name) for x in valid]
return random.sample(valid + invalid, 1)[0]
def run_QAgent(sumo_cfg, dqn_cfg, pretrain_traj_list, obs_q_list, action_q_list, traj_q_list, cuda_vis_devs):
try:
os.environ['CUDA_VISIBLE_DEVICES'] = cuda_vis_devs
agt = DQNAgent(sumo_cfg, dqn_cfg)
ep = 0
step = -1
while True:
for obs_q, action_q in zip(obs_q_list, action_q_list):
try:
obs_dict, model_index = obs_q.get(block=False)
action_q.put(agt.select_actions(obs_dict, model_index=model_index))
step += 1
except queue.Empty:
continue
for traj_q in traj_q_list:
try:
traj, prob = traj_q.get(block=False)
agt.remember(traj, prob)
except queue.Empty:
continue
# if agt.name == 'regulation' or agt.name == 'safety':
# print("training ", agt.name, " episode: {}".format(ep))
if random.random() < 1:
agt.replay()
if ep % 500 == 500 - 1:
agt.update_target()
agt.save_model()
ep += 1
if step % 40000 < len(obs_q_list):
agt.save_model(suffix=str(step//40000))
except:
raise
| {"/main.py": ["/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py", "/dqn_cfgs.py", "/workers.py"], "/sumo_cfgs.py": ["/sumo_gym.py"], "/github-release/sumo_gym.py": ["/observation.py", "/reward.py"], "/examples.py": ["/sumo_cfgs.py", "/sumo_gym.py"], "/github-release/main.py": ["/observation.py", "/sumo_gym.py", "/dqn.py"], "/test.py": ["/sumo_cfgs.py", "/sumo_gym.py", "/observation.py"], "/workers.py": ["/observation.py", "/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py"], "/dqn.py": ["/sumo_cfgs.py", "/replay_mem.py"], "/load_pretrain.py": ["/replay_mem.py", "/dqn.py", "/dqn_cfgs.py", "/sumo_cfgs.py"], "/sumo_gym.py": ["/observation.py", "/reward.py"], "/dqn_cfgs.py": ["/sumo_cfgs.py", "/dqn.py"]} |
54,403 | pulinau/sumo_rl_driving | refs/heads/main | /observation.py | #!python3
__author__ = "Changjian Li"
import heapq
from copy import deepcopy
from include import *
def get_observation_space(env):
observation_space = spaces.Dict({"ego_speed": spaces.Box(0, env.MAX_VEH_SPEED, shape=(1,), dtype=np.float32),
"ego_dist_to_end_of_lane": spaces.Box(0, env.OBSERVATION_RADIUS, shape=(1,), dtype=np.float32),
"ego_in_intersection": spaces.Discrete(2),
"ego_exists_left_lane": spaces.Discrete(2),
"ego_exists_right_lane": spaces.Discrete(2),
"ego_edge_changed": spaces.Discrete(2),
"ego_has_priority": spaces.Discrete(2),
"ego_priority_changed": spaces.Discrete(2),
"ego_correct_lane_gap": spaces.Box(-env.NUM_LANE_CONSIDERED, env.NUM_LANE_CONSIDERED, shape=(1,), dtype=np.int16),
"exists_vehicle": spaces.MultiBinary(env.NUM_VEH_CONSIDERED),
"is_new": spaces.MultiBinary(env.NUM_VEH_CONSIDERED),
"collision": spaces.MultiBinary(env.NUM_VEH_CONSIDERED),
"relative_speed": spaces.Box(-2* env.MAX_VEH_SPEED, env.MAX_VEH_SPEED, (env.NUM_VEH_CONSIDERED,), dtype=np.float32), # relative speed projected onto ego speed, not symmetric
"dist_to_end_of_lane": spaces.Box(0, env.OBSERVATION_RADIUS, (env.NUM_VEH_CONSIDERED,), dtype=np.float32),
"right_signal": spaces.MultiBinary(env.NUM_VEH_CONSIDERED),
"left_signal": spaces.MultiBinary(env.NUM_VEH_CONSIDERED),
"brake_signal": spaces.MultiBinary(env.NUM_VEH_CONSIDERED),
"in_intersection": spaces.MultiBinary(env.NUM_VEH_CONSIDERED),
"relative_position": spaces.Box(-env.OBSERVATION_RADIUS, env.OBSERVATION_RADIUS, (env.NUM_VEH_CONSIDERED, 2), dtype=np.float32),
"relative_heading": spaces.Box(-np.pi, np.pi, (env.NUM_VEH_CONSIDERED,), dtype=np.float32), # anti-clockwise
"has_priority": spaces.MultiBinary(env.NUM_VEH_CONSIDERED),
"veh_relation_peer": spaces.MultiBinary(env.NUM_VEH_CONSIDERED),
"veh_relation_conflict": spaces.MultiBinary(env.NUM_VEH_CONSIDERED),
"veh_relation_next": spaces.MultiBinary(env.NUM_VEH_CONSIDERED),
"veh_relation_prev": spaces.MultiBinary(env.NUM_VEH_CONSIDERED),
"veh_relation_left": spaces.MultiBinary(env.NUM_VEH_CONSIDERED),
"veh_relation_right": spaces.MultiBinary(env.NUM_VEH_CONSIDERED),
"veh_relation_ahead": spaces.MultiBinary(env.NUM_VEH_CONSIDERED),
"veh_relation_behind": spaces.MultiBinary(env.NUM_VEH_CONSIDERED),
"veh_relation_none": spaces.MultiBinary(env.NUM_VEH_CONSIDERED), # none of the above
"ttc": spaces.Box(0, env.MAX_TTC_CONSIDERED, (env.NUM_VEH_CONSIDERED,), dtype=np.float32)
})
return observation_space
def get_veh_dict(env):
"""get the current state of all vehicles."""
veh_id_list = env.tc.vehicle.getIDList()
veh_dict = {}
for veh_id in veh_id_list:
veh_dict[veh_id] = {}
veh_dict[veh_id]["type"] = "vehicle"
veh_dict[veh_id]["position"] = env.tc.vehicle.getPosition(veh_id)
veh_dict[veh_id]["angle"] = env.tc.vehicle.getAngle(veh_id) # in degree. North is zero, clockwise
veh_dict[veh_id]["speed"] = env.tc.vehicle.getSpeed(veh_id)
veh_dict[veh_id]["dimension"] = (env.tc.vehicle.getLength(veh_id), env.tc.vehicle.getWidth(veh_id))
veh_dict[veh_id]["edge_id"] = env.tc.vehicle.getRoadID(veh_id)
veh_dict[veh_id]["lane_id"] = env.tc.vehicle.getLaneID(veh_id)
veh_dict[veh_id]["lane_index"] = env.tc.vehicle.getLaneIndex(veh_id)
veh_dict[veh_id]["lane_length"] = env.tc.lane.getLength(veh_dict[veh_id]["lane_id"])
veh_dict[veh_id]["lane_position"] = env.tc.vehicle.getLanePosition(veh_id) # position in the lane
veh_dict[veh_id]["route"] = env.tc.vehicle.getRoute(veh_id)
signals = env.tc.vehicle.getSignals(veh_id) + 16 # force binary representation to have more than 5 bit
veh_dict[veh_id]["right_signal"] = int(bin(signals)[-1]) # 1 if signal is on, 0 otherwise
veh_dict[veh_id]["left_signal"] = int(bin(signals)[-2])
veh_dict[veh_id]["brake_signal"] = int(bin(signals)[-4])
route = veh_dict[veh_id]["route"] # route is an edge id list of the vehicle's route
if len(route) > env.tc.vehicle.getRouteIndex(veh_id) + 1:
veh_dict[veh_id]["next_normal_edge_id"] = route[env.tc.vehicle.getRouteIndex(veh_id) + 1]
else:
veh_dict[veh_id]["next_normal_edge_id"] = None
return veh_dict
def get_lanelet_dict(sumo_net_xml_file):
net = sumolib.net.readNet(sumo_net_xml_file, withInternal=True)
lanelet_dict = {}
edges = net.getEdges()
for edge in edges:
# add "next" and "previous" connection
# since there's no direct sumolib API to get previous lane, we need to do some tricks here
for lane in edge.getLanes():
lane_id = lane.getID()
lanelet_dict[lane_id] = {}
lanelet_dict[lane_id]["prev_lane_id_list"] = []
lanelet_dict[lane_id]["prev_normal_lane_id_list"] = []
for edge in edges:
for lane in edge.getLanes():
lane_id = lane.getID()
lane_index = lane.getIndex()
lanelet_dict[lane_id]["waypoint"] = lane.getShape()
lanelet_dict[lane_id]["from_node_id"] = edge.getFromNode().getID()
lanelet_dict[lane_id]["to_node_id"] = edge.getToNode().getID()
lanelet_dict[lane_id]["edge_id"] = edge.getID()
lanelet_dict[lane_id]["lane_index"] = lane_index
if lane_id[0] != ':':
lanelet_dict[lane_id]["edge_priority"] = edge.getPriority()
lanelet_dict[lane_id]["next_normal_lane_id_list"] = [conn.getToLane().getID() for conn in lane.getOutgoing()]
if lane_id[0] == ':':
lanelet_dict[lane_id]["next_lane_id_list"] = [conn.getToLane().getID() for conn in lane.getOutgoing()]
else:
lanelet_dict[lane_id]["next_lane_id_list"] = [conn.getViaLaneID() for conn in lane.getOutgoing()]
for next_lane_id in lanelet_dict[lane_id]["next_normal_lane_id_list"] + lanelet_dict[lane_id]["next_lane_id_list"]:
lanelet_dict[next_lane_id]["prev_normal_lane_id_list"] += [lane_id]
for next_lane_id in lanelet_dict[lane_id]["next_lane_id_list"]:
lanelet_dict[next_lane_id]["prev_lane_id_list"] += [lane_id]
if lane_index == len(edge.getLanes()) - 1:
lanelet_dict[lane_id]["left_lane_id"] = None
else:
lanelet_dict[lane_id]["left_lane_id"] = edge.getLanes()[lane_index+1].getID()
if lane_index == 0:
lanelet_dict[lane_id]["right_lane_id"] = None
else:
lanelet_dict[lane_id]["right_lane_id"] = edge.getLanes()[lane_index-1].getID()
# "left" and "right" connections for opposite direction lane are not added
# edge priority of internal lane inherits that of the previous lane
for edge in edges:
for lane in edge.getLanes():
lane_id = lane.getID()
if lane_id[0] == ':':
if len(lanelet_dict[lane_id]["prev_normal_lane_id_list"]) > 0:
lanelet_dict[lane_id]["edge_priority"] = lanelet_dict[lanelet_dict[lane_id]["prev_normal_lane_id_list"][0]]["edge_priority"]
for edge in edges:
for lane in edge.getLanes():
lane_id = lane.getID()
if lane_id[0] == ':':
if len(lanelet_dict[lane_id]["prev_normal_lane_id_list"]) == 0:
if len(lanelet_dict[lane_id]["prev_lane_id_list"]) > 0:
lanelet_dict[lane_id]["edge_priority"] = lanelet_dict[lanelet_dict[lane_id]["prev_lane_id_list"][0]]["edge_priority"]
else:
lanelet_dict[lane_id]["edge_priority"] = 1 # 1 is the default edge priority
return lanelet_dict
def get_edge_dict(sumo_net_xml_file):
net = sumolib.net.readNet(sumo_net_xml_file, withInternal=True)
edge_dict = {}
edges = net.getEdges()
for edge in edges:
edge_id = edge.getID()
edge_dict[edge_id] = {}
edge_dict[edge_id]["lane_id_list"] = [lane.getID() for lane in edge.getLanes()]
edge_dict[edge_id]["from_node_id"] = edge.getFromNode().getID()
edge_dict[edge_id]["to_node_id"] = edge.getToNode().getID()
return edge_dict
def get_obs_dict(env):
veh_dict = get_veh_dict(env)
# set the color to default
[env.tc.vehicle.setColor(k, env.DEFAULT_COLOR) for k, v in veh_dict.items() if k!=env.EGO_VEH_ID]
lanelet_dict = deepcopy(env.lanelet_dict)
edge_dict = deepcopy(env.edge_dict)
if len(env.obs_dict_hist) > 0:
old_obs_dict = deepcopy(env.obs_dict_hist[-1])
else:
old_obs_dict = None
obs_dict = {}
ego_dict = veh_dict[env.EGO_VEH_ID]
lane_id_list_ego_edge = edge_dict[ego_dict["edge_id"]]["lane_id_list"]
if ego_dict["next_normal_edge_id"] != None:
lane_id_list_ego_next_normal_edge = edge_dict[ego_dict["next_normal_edge_id"]]["lane_id_list"]
else:
lane_id_list_ego_next_normal_edge = []
obs_dict["ego_speed"] = ego_dict["speed"]
obs_dict["ego_dist_to_end_of_lane"] = min(ego_dict["lane_length"] - ego_dict["lane_position"], env.OBSERVATION_RADIUS)
# lanes inside intersections have ids that start with ":"
if ego_dict["lane_id"][0] == ":":
obs_dict["ego_in_intersection"] = 1
else:
obs_dict["ego_in_intersection"] = 0
# couldChangeLane has a time lag of one step, a workaround is needed until this is fixed
#if env.tc.vehicle.couldChangeLane(env.EGO_VEH_ID, 1):
if ego_dict["lane_index"] < len(lane_id_list_ego_edge)-1:
obs_dict["ego_exists_left_lane"] = 1
else:
obs_dict["ego_exists_left_lane"] = 0
if ego_dict["lane_index"] != 0:
obs_dict["ego_exists_right_lane"] = 1
else:
obs_dict["ego_exists_right_lane"] = 0
# correct lane
# if next normal edge doesn't exist, consider ego to be already in correct lane
obs_dict["ego_correct_lane_gap"] = 0
min_lane_gap = None
for x in lane_id_list_ego_next_normal_edge:
for y in lane_id_list_ego_edge:
if internal_lane_id_between_lanes(y, x, lanelet_dict) != None:
lane_gap = lanelet_dict[y]["lane_index"] - ego_dict["lane_index"]
if min_lane_gap is None or abs(lane_gap) < abs(min_lane_gap):
min_lane_gap = lane_gap
if min_lane_gap == None:
obs_dict["ego_correct_lane_gap"] = 0
else:
obs_dict["ego_correct_lane_gap"] = min_lane_gap
if obs_dict["ego_correct_lane_gap"] > 0:
obs_dict["ego_correct_lane_gap"] = min(obs_dict["ego_correct_lane_gap"], env.NUM_LANE_CONSIDERED)
else:
obs_dict["ego_correct_lane_gap"] = max(obs_dict["ego_correct_lane_gap"], -env.NUM_LANE_CONSIDERED)
obs_dict["ego_edge_id"] = ego_dict["edge_id"]
obs_dict["ego_edge_changed"] = 1
if old_obs_dict is not None and old_obs_dict["ego_edge_id"] == obs_dict["ego_edge_id"]:
obs_dict["ego_edge_changed"] = 0
# vehicles inside region of insterest
def in_ROI(ego_position, veh_position):
if (np.linalg.norm([veh_position[0]-ego_position[0], veh_position[1]-ego_position[1]]) < env.OBSERVATION_RADIUS):
return True
return False
veh_id_list_ROI = [k for k, v in veh_dict.items() if k!=env.EGO_VEH_ID and in_ROI(ego_dict["position"], v["position"])]
# now deal with the relavant vehicles
obs_dict["veh_ids"] = [None] * env.NUM_VEH_CONSIDERED
obs_dict["is_new"] = [1] * env.NUM_VEH_CONSIDERED
obs_dict["collision"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["veh_relation_next"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["veh_relation_prev"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["exists_vehicle"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["speed"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["relative_speed"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["dist_to_end_of_lane"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["right_signal"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["left_signal"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["brake_signal"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["in_intersection"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["relative_position"] = [[-env.OBSERVATION_RADIUS, -env.OBSERVATION_RADIUS]] * env.NUM_VEH_CONSIDERED
obs_dict["relative_heading"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["has_priority"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["veh_relation_peer"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["veh_relation_conflict"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["veh_relation_left"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["veh_relation_right"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["veh_relation_ahead"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["veh_relation_behind"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["veh_relation_none"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["ttc"] = [env.MAX_TTC_CONSIDERED] * env.NUM_VEH_CONSIDERED
# sort veh within ROI by distance to ego
veh_heap = []
for veh_id in veh_id_list_ROI:
state_dict = veh_dict[veh_id]
heapq.heappush(veh_heap, (np.linalg.norm(np.array(state_dict["position"]) - np.array(ego_dict["position"])), veh_id))
veh_id_list_ROI = []
for _ in range(min(env.NUM_VEH_CONSIDERED, len(veh_heap))):
_, veh_id = heapq.heappop(veh_heap)
veh_id_list_ROI += [veh_id]
if old_obs_dict is None:
old_veh_id_list = list(set())
new_veh_id_list = list(set(veh_id_list_ROI))
else:
old_veh_id_list = list(set(veh_id_list_ROI) & set(old_obs_dict["veh_ids"]))
new_veh_id_list = list(set(veh_id_list_ROI) - set(old_obs_dict["veh_ids"]))
new_index = 0
for veh_id in old_veh_id_list + new_veh_id_list:
state_dict = veh_dict[veh_id]
if veh_id in old_veh_id_list:
veh_index = old_obs_dict["veh_ids"].index(veh_id)
else:
while obs_dict["is_new"][new_index] == 0:
new_index += 1
veh_index = new_index
new_index += 1
# NEXT, PREV
if state_dict["lane_id"] in lanelet_dict[ego_dict["lane_id"]]["next_lane_id_list"]:
obs_dict["veh_relation_next"][veh_index] = 1 # NEXT
if ego_dict["lane_id"] in lanelet_dict[state_dict["lane_id"]]["next_lane_id_list"]:
obs_dict["veh_relation_prev"][veh_index] = 1 # PREV
# if not approaching the same intersection, these are completely irrelevant, thus can be ignored
if edge_dict[ego_dict["edge_id"]]["to_node_id"] != edge_dict[state_dict["edge_id"]]["to_node_id"] and \
obs_dict["veh_relation_next"][veh_index] == 0 and \
obs_dict["veh_relation_prev"][veh_index] == 0:
if veh_id in new_veh_id_list:
# if a vehicle is completely irrelevant, then restore the counter
new_index = veh_index
continue
if veh_id in old_veh_id_list:
obs_dict["is_new"][veh_index] = 0
obs_dict["veh_ids"][veh_index] = veh_id
if veh_id in env.tc.simulation.getCollidingVehiclesIDList():
obs_dict["collision"][veh_index] = 1
obs_dict["exists_vehicle"][veh_index] = 1
obs_dict["speed"][veh_index] = state_dict["speed"]
obs_dict["dist_to_end_of_lane"][veh_index] = min(state_dict["lane_length"] - state_dict["lane_position"], env.OBSERVATION_RADIUS)
obs_dict["right_signal"][veh_index] = state_dict["right_signal"]
obs_dict["left_signal"][veh_index] = state_dict["left_signal"]
obs_dict["brake_signal"][veh_index] = state_dict["brake_signal"]
if state_dict["edge_id"][0] == ':':
obs_dict["in_intersection"][veh_index] = 1
# transform the position to ego coordinate
ego_angle_rad = ego_dict["angle"]/180 * np.pi
rotation_mat = np.array([[np.cos(ego_angle_rad), -np.sin(ego_angle_rad)],
[np.sin(ego_angle_rad), np.cos(ego_angle_rad)]])
relative_position = np.array(state_dict["position"]) - np.array(ego_dict["position"])
relative_position = np.matmul(rotation_mat, relative_position)
obs_dict["relative_position"][veh_index] = relative_position
relative_heading = -(state_dict["angle"] - ego_dict["angle"])/180 * np.pi # anti-clockwise
if relative_heading > np.pi:
relative_heading -= 2*np.pi
elif relative_heading < -np.pi:
relative_heading += 2*np.pi
obs_dict["relative_heading"][veh_index] = relative_heading
obs_dict["relative_speed"][veh_index] = state_dict["speed"] * np.cos(obs_dict["relative_heading"][veh_index]) - ego_dict["speed"]
# LEFT, RIGHT
if (state_dict["lane_id"] == lanelet_dict[ego_dict["lane_id"]]["left_lane_id"]
) or (lanelet_dict[ego_dict["lane_id"]]["left_lane_id"] is not None and
state_dict["lane_id"] in lanelet_dict[lanelet_dict[ego_dict["lane_id"]]["left_lane_id"]]["next_lane_id_list"]):
obs_dict["veh_relation_left"][veh_index] = 1 # LEFT
if (state_dict["lane_id"] == lanelet_dict[ego_dict["lane_id"]]["right_lane_id"]
) or (lanelet_dict[ego_dict["lane_id"]]["right_lane_id"] is not None and
state_dict["lane_id"] in lanelet_dict[lanelet_dict[ego_dict["lane_id"]]["right_lane_id"]]["next_lane_id_list"]):
obs_dict["veh_relation_right"][veh_index] = 1 # RIGHT
# AHEAD, BEHIND
if (state_dict["lane_id"] == ego_dict["lane_id"] and state_dict["lane_position"] > ego_dict["lane_position"]
) or (state_dict["lane_id"] in lanelet_dict[ego_dict["lane_id"]]["next_lane_id_list"]):
obs_dict["veh_relation_ahead"][veh_index] = 1 # AHEAD
if (state_dict["lane_id"] == ego_dict["lane_id"] and state_dict["lane_position"] <= ego_dict["lane_position"]
) or (ego_dict["lane_id"] in lanelet_dict[state_dict["lane_id"]]["next_lane_id_list"]):
obs_dict["veh_relation_behind"][veh_index] = 1 # BEHIND
# check if the each of the possible relationship holds for the vehicle
lane_id_list_veh_edge = edge_dict[state_dict["edge_id"]]["lane_id_list"]
if state_dict["next_normal_edge_id"] != None:
lane_id_list_veh_next_normal_edge = edge_dict[state_dict["next_normal_edge_id"]]["lane_id_list"]
else:
lane_id_list_veh_next_normal_edge = []
# PEER if vehicles merges into the same lane from different lanes, since we only have edge (not lane) information
# within the route, we need to search inside the next edge to see if there're any lanes whose previous lane
# belongs to the current edge of veh
if obs_dict["veh_relation_ahead"][veh_index] != 1 and \
obs_dict["veh_relation_behind"][veh_index] != 1 and \
obs_dict["veh_relation_left"][veh_index] != 1 and \
obs_dict["veh_relation_right"][veh_index] != 1 and \
state_dict["edge_id"] != ego_dict["edge_id"] and \
state_dict["next_normal_edge_id"] == ego_dict["next_normal_edge_id"] and \
ego_dict["next_normal_edge_id"] != None:
for x in lane_id_list_ego_next_normal_edge:
for y in lane_id_list_veh_edge:
for z in lane_id_list_ego_edge:
if internal_lane_id_between_lanes(y, x, lanelet_dict) != None and internal_lane_id_between_lanes(z, x, lanelet_dict) != None:
obs_dict["veh_relation_peer"][veh_index] = 1 # PEER
# CONFLICT if approaching/in the same intersection as the ego lane, and its route conflict that of the ego route
if obs_dict["veh_relation_ahead"][veh_index] != 1 and \
obs_dict["veh_relation_behind"][veh_index] != 1 and \
obs_dict["veh_relation_left"][veh_index] != 1 and \
obs_dict["veh_relation_right"][veh_index] != 1 and \
state_dict["edge_id"] != ego_dict["edge_id"] and \
state_dict["next_normal_edge_id"] != ego_dict["next_normal_edge_id"] and \
ego_dict["next_normal_edge_id"] != None and \
edge_dict[ego_dict["edge_id"]]["to_node_id"] == edge_dict[state_dict["edge_id"]]["to_node_id"]:
for u in lane_id_list_veh_next_normal_edge:
for v in lane_id_list_veh_edge:
lane_id0 = internal_lane_id_between_lanes(v, u, lanelet_dict)
for p in lane_id_list_ego_next_normal_edge:
for q in lane_id_list_ego_edge:
lane_id1 = internal_lane_id_between_lanes(q, p, lanelet_dict)
if lane_id0 != None and lane_id1 != None:
if v[0] == ":":
waypoint0 = lanelet_dict[lane_id0]["waypoint"] + lanelet_dict[u]["waypoint"]
else:
waypoint0 = lanelet_dict[v]["waypoint"] + lanelet_dict[lane_id0]["waypoint"] + lanelet_dict[u]["waypoint"]
waypoint0 = np.array(waypoint0)
waypoint0 = waypoint0[np.argmin(np.linalg.norm(waypoint0 - np.array(state_dict["position"]))) + 1:]
waypoint0 = np.insert(waypoint0, 0, state_dict["position"], axis=0)
if q[0] == ":":
waypoint1 = lanelet_dict[lane_id1]["waypoint"] + lanelet_dict[p]["waypoint"]
else:
waypoint1 =lanelet_dict[q]["waypoint"] + lanelet_dict[lane_id1]["waypoint"] + lanelet_dict[p]["waypoint"]
waypoint1 = np.array(waypoint1)
waypoint1 = waypoint1[np.argmin(np.linalg.norm(waypoint1 - np.array(ego_dict["position"]))):]
# waypoint1 = np.insert(waypoint1, 0, ego_dict["position"], axis=0)
if waypoint_intersect(waypoint0, waypoint1) == True:
obs_dict["veh_relation_conflict"][veh_index] = 1 # CONFLICT
if (obs_dict["veh_relation_conflict"][veh_index] != 1 and
obs_dict["veh_relation_peer"][veh_index] != 1 and
obs_dict["veh_relation_behind"][veh_index] != 1 and
obs_dict["veh_relation_ahead"][veh_index] != 1 and
obs_dict["veh_relation_right"][veh_index] != 1 and
obs_dict["veh_relation_left"][veh_index] != 1 and
obs_dict["veh_relation_next"][veh_index] != 1 and
obs_dict["veh_relation_prev"][veh_index] != 1):
obs_dict["veh_relation_none"][veh_index] = 1
# vehicle has priority over ego if the vehicle is
# approaching/in the same intersection and it's inside a lane of higher priority
# if a vehicle has a speed of less than 0.1, it's considered not the first vehicle at the intersection
if (obs_dict["veh_relation_conflict"][veh_index] == 1 or obs_dict["veh_relation_peer"][veh_index] == 1) and \
edge_dict[state_dict["edge_id"]]["to_node_id"] == edge_dict[ego_dict["edge_id"]]["to_node_id"]:
if lanelet_dict[state_dict["lane_id"]]["edge_priority"] > lanelet_dict[ego_dict["lane_id"]]["edge_priority"]:
obs_dict["has_priority"][veh_index] = 1
elif lanelet_dict[state_dict["lane_id"]]["edge_priority"] == lanelet_dict[ego_dict["lane_id"]]["edge_priority"]:
if state_dict["right_signal"] == 0 and state_dict["left_signal"] == 0 and \
(ego_dict["right_signal"] != 0 or ego_dict["left_signal"] != 0):
obs_dict["has_priority"][veh_index] = 1
if state_dict["right_signal"] != 0 and ego_dict["left_signal"] != 0:
obs_dict["has_priority"][veh_index] = 1
# if not the first vehicle approaching the intersection, then no priority
if obs_dict["dist_to_end_of_lane"][veh_index] > 6 and state_dict["speed"] < 0.01:
obs_dict["has_priority"][veh_index] = 0
# change the color of the high-priority vehicle
if obs_dict["has_priority"][veh_index] == 1:
env.tc.vehicle.setColor(obs_dict["veh_ids"][veh_index], env.YIELD_COLOR)
# time to collision (just an estimate)
ego_v = np.array([0, obs_dict["ego_speed"]])
speed = obs_dict["speed"][veh_index]
angle = obs_dict["relative_heading"][veh_index] + np.pi / 2
v = np.array([speed * np.cos(angle), speed * np.sin(angle)])
if obs_dict["veh_relation_ahead"][veh_index] == 1 or \
obs_dict["veh_relation_behind"][veh_index] == 1 or \
obs_dict["veh_relation_conflict"][veh_index] == 1 or \
obs_dict["veh_relation_peer"][veh_index] == 1:
pos = obs_dict["relative_position"][veh_index]
t0 = pos[0] / max(abs(ego_v[0] - v[0]), 0.0001) * np.sign(ego_v[0] - v[0])
if abs(v[0] - ego_v[0]) < 0.0001:
if abs(pos[0]) < 2.5:
t0 = None
else:
t0 = env.MAX_TTC_CONSIDERED
t1 = pos[1] / max(abs(ego_v[1] - v[1]), 0.0001) * np.sign(ego_v[1] - v[1])
if abs(v[1] - ego_v[1]) < 0.0001:
if abs(pos[1]) < 2.5:
t1 = None
else:
t1 = env.MAX_TTC_CONSIDERED
if t0 is None and t1 is None:
ttc = 0
else:
if t0 is None:
t0 = t1
if t1 is None:
t1 = t0
if abs(t1 - t0) < 2.5 or \
obs_dict["veh_relation_ahead"][veh_index] == 1 or obs_dict["veh_relation_behind"][veh_index] == 1:
ttc = max(t0, t1)
else:
ttc = env.MAX_TTC_CONSIDERED
if ttc < -1 or ttc > env.MAX_TTC_CONSIDERED:
ttc = env.MAX_TTC_CONSIDERED
else:
ttc = env.MAX_TTC_CONSIDERED
obs_dict["ttc"][veh_index] = ttc
obs_dict["ego_has_priority"] = 1
for i in range(env.NUM_VEH_CONSIDERED):
if obs_dict["has_priority"][i] == 1:
obs_dict["ego_has_priority"] = 0
obs_dict["ego_priority_changed"] = 1
if old_obs_dict is not None and old_obs_dict["ego_has_priority"] == obs_dict["ego_has_priority"]:
obs_dict["ego_priority_changed"] = 0
pass
return obs_dict
def intersect(p0, p1, q0, q1):
"""check if two line segments p0-p1, q0-q1 intersect"""
def ccw(a,b,c):
"""check if the three points are in counterclockwise order"""
return (c[1]-a[1])*(b[0]-a[0]) > (b[1]-a[1])*(c[0]-a[0])
return ccw(p0,q0,q1) != ccw(p1,q0,q1) and ccw(p0,p1,q0) != ccw(p0,p1,q1)
def waypoint_intersect(waypoints0, waypoints1):
for m in range(len(waypoints0)-1):
for n in range(len(waypoints1)-1):
if intersect(waypoints0[m], waypoints0[m+1], waypoints1[n], waypoints1[n+1]):
return True
return False
def internal_lane_id_between_lanes(from_lane_id, to_lane_id, lanelet_dict):
if from_lane_id[0] == ':':
if to_lane_id in lanelet_dict[from_lane_id]["next_lane_id_list"]:
return from_lane_id
elif to_lane_id[0] == ':':
if to_lane_id in lanelet_dict[from_lane_id]["next_lane_id_list"]:
return to_lane_id
else:
for lane_id in lanelet_dict[from_lane_id]["next_lane_id_list"]:
if lane_id in lanelet_dict[to_lane_id]["prev_lane_id_list"]:
return lane_id
return None
| {"/main.py": ["/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py", "/dqn_cfgs.py", "/workers.py"], "/sumo_cfgs.py": ["/sumo_gym.py"], "/github-release/sumo_gym.py": ["/observation.py", "/reward.py"], "/examples.py": ["/sumo_cfgs.py", "/sumo_gym.py"], "/github-release/main.py": ["/observation.py", "/sumo_gym.py", "/dqn.py"], "/test.py": ["/sumo_cfgs.py", "/sumo_gym.py", "/observation.py"], "/workers.py": ["/observation.py", "/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py"], "/dqn.py": ["/sumo_cfgs.py", "/replay_mem.py"], "/load_pretrain.py": ["/replay_mem.py", "/dqn.py", "/dqn_cfgs.py", "/sumo_cfgs.py"], "/sumo_gym.py": ["/observation.py", "/reward.py"], "/dqn_cfgs.py": ["/sumo_cfgs.py", "/dqn.py"]} |
54,404 | pulinau/sumo_rl_driving | refs/heads/main | /dqn.py | #!python3
__author__ = "Changjian Li"
from include import *
from sumo_cfgs import *
from utils import class_vars
import random
import tensorflow as tf
import numpy as np
from replay_mem import ReplayMemory, ReplayMemoryManager
from action import loosen_correct_actions
import time
import multiprocessing as mp
import queue
from collections import deque
class DQNCfg():
def __init__(self,
name,
play,
version,
resume,
state_size,
action_size,
low_target,
high_target,
gamma,
gamma_inc,
gamma_max,
epsilon,
epsilon_dec,
epsilon_min,
threshold,
memory_size,
traj_end_pred,
replay_batch_size,
traj_end_ratio,
_build_model,
model_rst_prob_list,
tf_cfg,
reshape,
_select_actions = None):
self.name = name
self.play = play # whether it's training or playing
self.version = version
self.resume = resume
self.state_size = state_size
self.action_size = action_size
self.low_target = low_target
self.high_target = high_target
self.gamma = gamma
self.gamma_inc = gamma_inc
self.gamma_max = gamma_max
self.epsilon = epsilon
self.epsilon_dec = epsilon_dec
self.epsilon_min = epsilon_min
self.threshold = threshold
self.memory_size = memory_size
self.traj_end_pred = traj_end_pred
self.replay_batch_size = replay_batch_size
self.traj_end_ratio = traj_end_ratio # ratio states where bootstrap happens in the sample
self._build_model = _build_model
self.model_rst_prob_list = model_rst_prob_list
self.tf_cfg = tf_cfg
self.reshape = reshape
self._select_actions = _select_actions
def feed_samp(replay_mem, samp_size, traj_end_ratio, samp_q):
try:
while True:
if replay_mem.size() == 0:
time.sleep(5)
continue
elif samp_q.qsize() < 80:
samp_q.put(replay_mem.sample(samp_size, traj_end_ratio))
# print("replay mem size: ", replay_mem.size())
except:
raise
class DQNAgent:
def __init__(self, sumo_cfg, dqn_cfg):
_attrs = class_vars(dqn_cfg)
for _attr in _attrs:
setattr(self, _attr, getattr(dqn_cfg, _attr))
self.sumo_cfg = sumo_cfg
if self._select_actions is not None:
return
assert self.memory_size >= self.replay_batch_size
tf.keras.backend.set_session(tf.Session(config=self.tf_cfg))
if self.play == True:
self.epsilon = 0
else:
manager = ReplayMemoryManager()
manager.start()
self.memory = manager.ReplayMemory(self.memory_size, self.name)
self.sample_q = mp.Queue(maxsize=100)
self.feed_samp_p_list = [mp.Process(target=feed_samp,
name='feed_samp ' + self.name,
args=(self.memory,
self.replay_batch_size,
self.traj_end_ratio,
self.sample_q))
for _ in range(1)]
[p.start() for p in self.feed_samp_p_list]
# half trained model to keep exploration steady
self.model_list = [self._build_model() for _ in range(len(self.model_rst_prob_list))]
self.target_model_list = [self._build_model() for _ in range(len(self.model_rst_prob_list))]
self.loss_hist_list = [deque(maxlen=10) for _ in range(len(self.model_rst_prob_list))]
self.loss_hist = deque(maxlen=10)
if self.play == True:
self.model = self._load_model(self.name + ".sav." + self.version)
elif self.resume == True:
self.model = self._load_model(self.name + ".sav." + self.version)
self.target_model = self._load_model(self.name + ".sav." + self.version)
else:
self.model = self._build_model()
self.target_model = self._build_model()
def __del__(self):
if self.play != True:
[p.terminate() for p in self.feed_samp_p_list]
def remember(self, traj, prob):
"""remember experice with probability prob"""
if self._select_actions is not None or self.play == True:
return
traj = [(self.reshape(obs_dict), action, reward, self.reshape(next_obs_dict), next_action, done, important)
for obs_dict, action, reward, next_obs_dict, next_action, done, important in traj]
self.memory.add_traj(traj, self.traj_end_pred, prob)
def select_actions(self, obs_dict, model_index=None):
"""
select actions based on Q value
:param obs_dict:
:return: (action_set, explore_set, sorted_idx) exploit action set, explore action set and a np.array of actions
sorted according to descending desirability
"""
if self._select_actions is not None:
return self._select_actions(self.reshape(obs_dict))
if model_index is None:
model = self.model
else:
model = self.model_list[model_index]
act_values = model.predict(self.reshape(obs_dict))[-1][0]
sorted_idx = np.argsort(act_values)[::-1]
if self.play == True:
print(self.name, act_values)
action_set = set(np.where(act_values >= np.max(act_values) + self.threshold)[0])
return (action_set, list(sorted_idx))
def replay(self):
if self._select_actions is not None or self.play == True:
return
try:
states, actions, rewards, next_states, next_actions, not_dones, steps = self.sample_q.get(block=False)
except queue.Empty:
# print("replay qsize: ", self.sample_q.qsize())
# print(self.name, " empty")
return
for model_index, (model, target_model, loss_hist) in enumerate(zip(self.model_list + [self.model],
self.target_model_list + [self.target_model],
self.loss_hist_list + [self.loss_hist])):
rewards = np.array(rewards)
not_dones = np.array(not_dones)
targets_f = model.predict_on_batch(states)
m, n = len(targets_f) - 1, len(targets_f[0])
actions = np.array(actions, dtype=np.int) * np.ones(shape=(m, n), dtype=np.int)
next_actions = np.array(next_actions, dtype=np.int) * np.ones(shape=(m, n), dtype=np.int)
steps = np.array(steps, dtype=np.int) * np.ones(shape=(m, n), dtype=np.int)
next_q = target_model.predict_on_batch(next_states)[:-1]
backup = np.array([[next_q[i][j][next_actions[i][j]] for j in range(n)] for i in range(m)])
backup = (self.gamma ** (steps + 1)) * not_dones * backup
# clamp targets larger than zero to zero
backup[np.where(backup > 0)] = 0
backup[np.where(backup < self.low_target)] = self.low_target
targets = (self.gamma ** steps) * rewards + backup
# clamp incorrect target to zero
for i in range(m):
for j in range(n):
x = targets_f[i][j]
x[np.where(x > 0)] = 0
x[np.where(x < self.low_target)] = self.low_target
x[actions[i][j]] = targets[i][j]
# print(self.name, targets_f[-1][0])
loss = model.train_on_batch(states, targets_f)
if self.name == "safety":
print("model ", model_index, " ", loss[0])
loss_hist.append(loss[0])
ep = 0
factor = 1.2
max_train_ep = loss[0]/max(np.median(loss_hist), 0.000000000001) - factor
while loss[0] > factor * np.median(loss_hist) and ep < min(10 * max_train_ep, 10) and model_index == len(self.model_list):
ep += 1
targets_f = model.predict_on_batch(states)
# clamp incorrect target to zero
for i in range(m):
for j in range(n):
x = targets_f[i][j]
x[np.where(x > 0)] = 0
x[np.where(x < self.low_target)] = self.low_target
x[actions[i][j]] = targets[i][j]
loss = model.train_on_batch(states, targets_f)
if self.name == "safety":
print("model ", model_index, " ", self.name, " supplementary training:", np.median(loss_hist), loss[0])
self.reset_models()
if self.gamma < self.gamma_max:
self.gamma += self.gamma_inc
if self.epsilon > self.epsilon_min:
self.epsilon -= self.epsilon_dec
def update_target(self):
if self._select_actions is not None or self.play == True:
return
for model, target_model in zip(self.model_list + [self.model],
self.target_model_list + [self.target_model]):
target_model.set_weights(model.get_weights())
def reset_models(self):
"""
reset exploration models
:return: None
"""
for i in range(len(self.model_list)):
if random.random() < self.model_rst_prob_list[i]:
self.model_list[i] = self._build_model()
self.target_model_list[i] = self._build_model()
def _load_model(self, filename):
return tf.keras.models.load_model(filename, custom_objects={"tf": tf, "NUM_VEH_CONSIDERED": self.sumo_cfg.NUM_VEH_CONSIDERED})
def save_model(self, suffix='current'):
if self._select_actions is not None or self.play == True:
return
name = self.name + ".sav." + suffix
self.model.save(name)
| {"/main.py": ["/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py", "/dqn_cfgs.py", "/workers.py"], "/sumo_cfgs.py": ["/sumo_gym.py"], "/github-release/sumo_gym.py": ["/observation.py", "/reward.py"], "/examples.py": ["/sumo_cfgs.py", "/sumo_gym.py"], "/github-release/main.py": ["/observation.py", "/sumo_gym.py", "/dqn.py"], "/test.py": ["/sumo_cfgs.py", "/sumo_gym.py", "/observation.py"], "/workers.py": ["/observation.py", "/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py"], "/dqn.py": ["/sumo_cfgs.py", "/replay_mem.py"], "/load_pretrain.py": ["/replay_mem.py", "/dqn.py", "/dqn_cfgs.py", "/sumo_cfgs.py"], "/sumo_gym.py": ["/observation.py", "/reward.py"], "/dqn_cfgs.py": ["/sumo_cfgs.py", "/dqn.py"]} |
54,405 | pulinau/sumo_rl_driving | refs/heads/main | /replay_mem.py | #!python3
__author__ = "Changjian Li"
import random
import time
import multiprocessing as mp
from multiprocessing.managers import BaseManager
from copy import deepcopy
class ReplayMemory():
"""
Replay memory for dynamic n-step look-ahead. Since the problem domain has very sparse reward,
instead of using a fixed look-ahead time step, bootstrap is only done when the reward is actually received.
Insignificant reward in between can be ignored by supplying a predicate, which is treated as 0 reward.
"""
def __init__(self, max_len, name):
"""
:param max_len: max replay memory size
"""
assert max_len > 0, "max_len must be greater than zero"
self.max_len = max_len
self.lock = mp.Lock()
self.lock.acquire()
self.states = []
self.actions = []
self.rewards = []
self.next_states = []
self.next_actions =[]
self.not_dones = []
self.steps = []
self.end_states = []
self.end_actions = []
self.end_rewards = []
self.end_next_states = []
self.end_next_actions = []
self.end_not_dones = []
self.end_steps = []
self._size = 0
self.lock.release()
self.name = name
self.avg_traj_seg_len = 0
def add_traj(self, traj, end_pred, prob):
"""
:param traj: list of state transitions
:param end_pred: decide whether the reward is significant enough to be considered
:return:
"""
self.lock.acquire()
traj_seg = []
for i, (state, action, reward, next_state, next_action, done, important) in enumerate(traj[::-1]):
if i == 0 or end_pred(reward):
end_reward, end_state, end_next_action, end_done = deepcopy(reward), deepcopy(next_state), deepcopy(next_action), deepcopy(done)
#if not done and i == 0:
# end_done = True
if len(traj_seg) != 0:
self.avg_traj_seg_len = (len(self.end_actions) * self.avg_traj_seg_len + len(traj_seg)) / \
(len(self.end_actions) + 1)
for j, (x, important) in enumerate(traj_seg):
if important or random.uniform(0, 1) < prob(i-(len(traj_seg)-j)):
self._add(x, j == len(traj_seg)-1)
traj_seg = []
step = 0
traj_seg.append(deepcopy(((state, action, end_reward, end_state, end_next_action, end_done, step), important)))
step += 1
self.avg_traj_seg_len = (len(self.end_actions) * self.avg_traj_seg_len + len(traj_seg)) / \
(len(self.end_actions) + 1)
for j, (x, important) in enumerate(traj_seg):
if important or random.uniform(0, 1) < prob(i-(len(traj_seg)-j)):
self._add(x, j == len(traj_seg)-1)
self.lock.release()
def _add(self, tran, is_end):
state, action, reward, next_state, next_action, done, step = deepcopy(tran)
not_done = [[not y for y in x] for x in done]
"""
try:
assert len(self.actions) == len(self.next_actions) and \
len(self.actions) == len(self.steps) and \
(len(self.actions) == 0 or len(self.actions) == len(self.states[0])) and \
(len(self.actions) == 0 or len(self.actions) == len(self.next_states[0])) and \
(len(self.actions) == 0 or len(self.actions) == len(self.not_dones[0])) and \
(len(self.actions) == 0 or len(self.actions) == len(self.rewards[0])), "must be of equal length"
except:
print('actions: ', self.actions)
print('next_actions: ', self.next_actions)
print('rewards: ', self.rewards)
print('not_dones: ', self.not_dones)
print('steps: ', self.steps)
print('states: ', len(self.states[0]))
print('next_states: ', len(self.next_states[0]))
raise
try:
assert len(self.end_actions) == len(self.end_next_actions) and \
len(self.end_actions) == len(self.end_steps) and \
(len(self.end_actions) == 0 or len(self.end_actions) == len(self.end_states[0])) and \
(len(self.end_actions) == 0 or len(self.end_actions) == len(self.end_next_states[0])) and \
(len(self.end_actions) == 0 or len(self.end_actions) == len(self.end_not_dones[0])) and \
(len(self.end_actions) == 0 or len(self.end_actions) == len(self.end_rewards[0])), "must be of equal length"
except:
print('end_actions: ', self.end_actions)
print('end_next_actions: ', self.end_next_actions)
print('end_rewards: ', self.end_rewards)
print('end_not_dones: ', self.end_not_dones)
print('end_steps: ', self.end_steps)
print('end_states: ', len(self.end_states[0]))
print('end_next_states: ', len(self.end_next_states[0]))
raise
"""
if self._size > 2 * self.max_len + 2:
for i in range(len(self.states)):
self.states[i] = self.states[i][self.max_len:]
self.next_states[i] = self.next_states[i][self.max_len:]
for i in range(len(self.rewards)):
self.rewards[i] = self.rewards[i][self.max_len:]
for i in range(len(self.not_dones)):
self.not_dones[i] = self.not_dones[i][self.max_len:]
self.actions = self.actions[self.max_len:]
self.next_actions = self.next_actions[self.max_len:]
self.steps = self.steps[self.max_len:]
self._size = len(self.actions)
self.actions += [action]
self.next_actions += [next_action]
self.steps += [step]
if self._size == 0:
for i in range(len(state)):
self.states += [state[i]]
self.next_states += [next_state[i]]
for i in range(len(reward)):
self.rewards += [reward[i]]
for i in range(len(not_done)):
self.not_dones += [not_done[i]]
else:
for i in range(len(state)):
self.states[i] += state[i]
self.next_states[i] += next_state[i]
for i in range(len(reward)):
self.rewards[i] += reward[i]
for i in range(len(not_done)):
self.not_dones[i] += not_done[i]
self._size += 1
if is_end:
# avoid using the same copy
state, action, reward, next_state, next_action, done, step = deepcopy(tran)
not_done = [[not y for y in x] for x in done]
cap = int(self.max_len/(self.avg_traj_seg_len+1)) + 2
if len(self.end_actions) > 2 * cap:
for i in range(len(self.end_states)):
self.end_states[i] = self.end_states[i][cap:]
self.end_next_states[i] = self.end_next_states[i][cap:]
for i in range(len(self.end_rewards)):
self.end_rewards[i] = self.end_rewards[i][cap:]
for i in range(len(self.end_not_dones)):
self.end_not_dones[i] = self.end_not_dones[i][cap:]
self.end_actions = self.end_actions[cap:]
self.end_next_actions = self.end_next_actions[cap:]
self.end_steps = self.end_steps[cap:]
self.end_actions += [action]
self.end_next_actions += [next_action]
self.end_steps += [step]
if len(self.end_states) == 0:
for i in range(len(state)):
self.end_states += [state[i]]
self.end_next_states += [next_state[i]]
for i in range(len(reward)):
self.end_rewards += [reward[i]]
for i in range(len(not_done)):
self.end_not_dones += [not_done[i]]
else:
for i in range(len(state)):
self.end_states[i] += state[i]
self.end_next_states[i] += next_state[i]
for i in range(len(reward)):
self.end_rewards[i] += reward[i]
for i in range(len(not_done)):
self.end_not_dones[i] += not_done[i]
def _sample_end(self, n):
self.lock.acquire()
assert n > 0, "sample size must be positive"
assert len(self.end_actions) > 0, "replay memory empty"
indices = random.sample(range(len(self.end_actions)), min(n, len(self.end_actions)))
actions = [self.end_actions[i] for i in indices]
next_actions = [self.end_next_actions[i] for i in indices]
steps = [self.end_steps[i] for i in indices]
rewards = [[x[i] for i in indices]
for x in self.end_rewards]
not_dones = [[x[i] for i in indices]
for x in self.end_not_dones]
states = [[x[i] for i in indices]
for x in self.end_states]
next_states = [[x[i] for i in indices]
for x in self.end_next_states]
samp = deepcopy((states, actions, rewards, next_states, next_actions, not_dones, steps))
self.lock.release()
return samp
def _sample_traj(self, n):
self.lock.acquire()
assert n > 0, "sample size must be positive"
assert self._size > 0, "replay memory empty"
indices = random.sample(range(self._size), min(n, self._size))
actions = [self.actions[i] for i in indices]
next_actions = [self.next_actions[i] for i in indices]
steps = [self.steps[i] for i in indices]
rewards = [[x[i] for i in indices]
for x in self.rewards]
not_dones = [[x[i] for i in indices]
for x in self.not_dones]
states = [[x[i] for i in indices]
for x in self.states]
next_states = [[x[i] for i in indices]
for x in self.next_states]
samp = deepcopy((states, actions, rewards, next_states, next_actions, not_dones, steps))
self.lock.release()
return samp
def sample(self, n, traj_end_ratio):
assert traj_end_ratio < 1 and traj_end_ratio > 0, "traj_end_ratio must lie between 0 and 1"
alpha = self.avg_traj_seg_len / (1/traj_end_ratio -1 + self.avg_traj_seg_len)
assert alpha > 0 and alpha < 1, "alpha must be between 0 and 1"
end_states, end_actions, end_rewards, \
end_next_states, end_next_actions, end_not_dones, end_steps = self._sample_end(max(int(alpha*n), 1))
states, actions, rewards, \
next_states, next_actions, not_dones, steps = self._sample_traj(max(int((1-alpha)*n), 1))
actions += end_actions
next_actions += end_next_actions
steps += end_steps
for i in range(len(states)):
states[i] += end_states[i]
next_states[i] += end_next_states[i]
for i in range(len(rewards)):
rewards[i] += end_rewards[i]
for i in range(len(not_dones)):
not_dones[i] += end_not_dones[i]
return (states, actions, rewards, next_states, next_actions, not_dones, steps)
def size(self):
return self._size
class ReplayMemoryManager(BaseManager):
pass
ReplayMemoryManager.register('ReplayMemory', ReplayMemory) | {"/main.py": ["/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py", "/dqn_cfgs.py", "/workers.py"], "/sumo_cfgs.py": ["/sumo_gym.py"], "/github-release/sumo_gym.py": ["/observation.py", "/reward.py"], "/examples.py": ["/sumo_cfgs.py", "/sumo_gym.py"], "/github-release/main.py": ["/observation.py", "/sumo_gym.py", "/dqn.py"], "/test.py": ["/sumo_cfgs.py", "/sumo_gym.py", "/observation.py"], "/workers.py": ["/observation.py", "/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py"], "/dqn.py": ["/sumo_cfgs.py", "/replay_mem.py"], "/load_pretrain.py": ["/replay_mem.py", "/dqn.py", "/dqn_cfgs.py", "/sumo_cfgs.py"], "/sumo_gym.py": ["/observation.py", "/reward.py"], "/dqn_cfgs.py": ["/sumo_cfgs.py", "/dqn.py"]} |
54,406 | pulinau/sumo_rl_driving | refs/heads/main | /load_pretrain.py | #!python3
__author__ = "Changjian Li"
import numpy as np
from replay_mem import ReplayMemory
from dqn import *
from dqn_cfgs import *
from sumo_cfgs import sumo_cfg
def load_pretrain():
agt = DQNAgent(sumo_cfg, cfg_safety)
with open("examples.npz", "rb") as file:
npzfile = np.load(file)
mem = npzfile[npzfile.files[0]]
npzfile = None
for traj in mem:
for obs_dict, action, next_obs_dict in traj:
agt.remember([(obs_dict, action, 0, next_obs_dict, True)])
print(agt.memory.traj_mem)
load_pretrain() | {"/main.py": ["/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py", "/dqn_cfgs.py", "/workers.py"], "/sumo_cfgs.py": ["/sumo_gym.py"], "/github-release/sumo_gym.py": ["/observation.py", "/reward.py"], "/examples.py": ["/sumo_cfgs.py", "/sumo_gym.py"], "/github-release/main.py": ["/observation.py", "/sumo_gym.py", "/dqn.py"], "/test.py": ["/sumo_cfgs.py", "/sumo_gym.py", "/observation.py"], "/workers.py": ["/observation.py", "/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py"], "/dqn.py": ["/sumo_cfgs.py", "/replay_mem.py"], "/load_pretrain.py": ["/replay_mem.py", "/dqn.py", "/dqn_cfgs.py", "/sumo_cfgs.py"], "/sumo_gym.py": ["/observation.py", "/reward.py"], "/dqn_cfgs.py": ["/sumo_cfgs.py", "/dqn.py"]} |
54,407 | pulinau/sumo_rl_driving | refs/heads/main | /github-release/utils.py | #!python3
__author__ = "Changjian Li"
import inspect
def class_vars(obj):
return {k:v for k, v in inspect.getmembers(obj)
if not k.startswith('__') and not callable(k)}
| {"/main.py": ["/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py", "/dqn_cfgs.py", "/workers.py"], "/sumo_cfgs.py": ["/sumo_gym.py"], "/github-release/sumo_gym.py": ["/observation.py", "/reward.py"], "/examples.py": ["/sumo_cfgs.py", "/sumo_gym.py"], "/github-release/main.py": ["/observation.py", "/sumo_gym.py", "/dqn.py"], "/test.py": ["/sumo_cfgs.py", "/sumo_gym.py", "/observation.py"], "/workers.py": ["/observation.py", "/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py"], "/dqn.py": ["/sumo_cfgs.py", "/replay_mem.py"], "/load_pretrain.py": ["/replay_mem.py", "/dqn.py", "/dqn_cfgs.py", "/sumo_cfgs.py"], "/sumo_gym.py": ["/observation.py", "/reward.py"], "/dqn_cfgs.py": ["/sumo_cfgs.py", "/dqn.py"]} |
54,408 | pulinau/sumo_rl_driving | refs/heads/main | /sumo_gym.py | #!python3
__author__ = "Changjian Li"
import random
random.seed()
from action import get_action_space, disable_collision_check, enable_collision_check, act, infer_action
from observation import get_observation_space, get_veh_dict, get_obs_dict, get_lanelet_dict, get_edge_dict
from reward import get_reward_list
from utils import class_vars
from collections import deque
from copy import deepcopy
from include import *
class SumoCfg():
def __init__(self,
# sumo
SUMO_CMD,
SUMO_TIME_STEP,
NET_XML_FILE,
ROU_XML_FILE_LIST,
EGO_VEH_ID,
MAX_VEH_ACCEL,
MAX_VEH_DECEL,
MAX_VEH_SPEED,
# observation
NUM_LANE_CONSIDERED,
NUM_VEH_CONSIDERED,
MAX_TTC_CONSIDERED,
OBSERVATION_RADIUS,
# reward
MAX_COMFORT_ACCEL_LEVEL,
MAX_COMFORT_DECEL_LEVEL,
# color
DEFAULT_COLOR,
YIELD_COLOR):
self.SUMO_CMD = SUMO_CMD
self.SUMO_TIME_STEP = SUMO_TIME_STEP
self.NET_XML_FILE = NET_XML_FILE
self.ROU_XML_FILE_LIST = ROU_XML_FILE_LIST
self.EGO_VEH_ID = EGO_VEH_ID
self.MAX_VEH_ACCEL = MAX_VEH_ACCEL
self.MAX_VEH_DECEL = MAX_VEH_DECEL
self.MAX_VEH_SPEED = MAX_VEH_SPEED
self.NUM_LANE_CONSIDERED = NUM_LANE_CONSIDERED
self.NUM_VEH_CONSIDERED = NUM_VEH_CONSIDERED
self.MAX_TTC_CONSIDERED = MAX_TTC_CONSIDERED
self.OBSERVATION_RADIUS = OBSERVATION_RADIUS
self.MAX_COMFORT_ACCEL_LEVEL = MAX_COMFORT_ACCEL_LEVEL
self.MAX_COMFORT_DECEL_LEVEL = MAX_COMFORT_DECEL_LEVEL
self.DEFAULT_COLOR = DEFAULT_COLOR
self.YIELD_COLOR = YIELD_COLOR
class SumoGymEnv(gym.Env):
"""SUMO environment"""
def __init__(self, config):
_attrs = class_vars(config)
for _attr in _attrs:
setattr(self, _attr, getattr(config, _attr))
self.action_space = get_action_space()
self.obsevation_space = get_observation_space(self)
self.lanelet_dict = get_lanelet_dict(self.NET_XML_FILE)
self.edge_dict = get_edge_dict(self.NET_XML_FILE)
self.env_state = EnvState.NOT_STARTED
self._agt_ctrl = False # whether the ego car is controlled by RL agent
self.veh_dict_hist = deque(maxlen=2)
self.obs_dict_hist = deque(maxlen=2)
self.action_dict_hist = deque(maxlen=2)
self.sim_label = "sim" + str(random.randint(0, 65536))
ROU_XML_FILE = random.sample(self.ROU_XML_FILE_LIST, 1)
traci.start(self.SUMO_CMD + ROU_XML_FILE, label=self.sim_label)
self.tc = traci.getConnection(self.sim_label)
@property
def agt_ctrl(self):
return self._agt_ctrl
@agt_ctrl.setter
def agt_ctrl(self, value):
if value == True:
disable_collision_check(self, self.EGO_VEH_ID)
self._agt_ctrl = value
elif value == False:
enable_collision_check(self, self.EGO_VEH_ID)
self._agt_ctrl = value
else:
raise ValueError("SumoGymEnv.agt_ctrl must be either True or False")
def step(self, action):
obs = 0
done = 0
reward = 0
info = None
return obs, reward, done, info
def reset(self, init_step):
self.action_dict_hist.clear()
self.veh_dict_hist.clear()
self.obs_dict_hist.clear()
try:
ROU_XML_FILE = random.sample(self.ROU_XML_FILE_LIST, 1)
self.tc.load(self.SUMO_CMD[1:] + ROU_XML_FILE)
# 1st time step starts the simulation,
# 2nd makes sure that all initial vehicles (departure time < SUMO_TIME_STEP) are in scene
self.agt_ctrl = False
self.tc.simulationStep()
self.tc.simulationStep()
self.veh_dict_hist.append(get_veh_dict(self))
self.obs_dict_hist.append(get_obs_dict(self))
self.env_state = EnvState.NORMAL
for i in range(init_step):
if self.env_state == EnvState.NORMAL:
self.step()
if self.env_state != EnvState.NORMAL:
return self.reset(i-1)
self.agt_ctrl = True
return get_obs_dict(self)
except (traci.FatalTraCIError, traci.TraCIException):
self.env_state = EnvState.ERROR
raise
def close(self):
try:
self.tc.close()
except (traci.FatalTraCIError, traci.TraCIException):
self.env_state = EnvState.ERROR
raise
class MultiObjSumoEnv(SumoGymEnv):
def step(self, action_dict=None):
assert self.env_state == EnvState.NORMAL, "env.env_state is not EnvState.NORMAL"
try:
self.env_state = act(self, self.EGO_VEH_ID, action_dict)
# if ego reaches the end of an incorrect (turning) lane, simulation is considered as DONE
if self.env_state == EnvState.NORMAL and \
self.obs_dict_hist[-1]["ego_dist_to_end_of_lane"] < 0.01 and \
self.obs_dict_hist[-1]["ego_correct_lane_gap"] != 0:
self.env_state = EnvState.DONE
if self.env_state == EnvState.DONE:
obs_dict = deepcopy(self.obs_dict_hist[-1])
veh_dict = deepcopy(self.veh_dict_hist[-1])
else:
obs_dict = get_obs_dict(self)
veh_dict = get_veh_dict(self)
self.veh_dict_hist.append(veh_dict)
self.obs_dict_hist.append(obs_dict)
if self.agt_ctrl == False:
action_dict = infer_action(self)
self.action_dict_hist.append(action_dict)
info = action_dict
"""
print(self.obs_dict_hist[-1]["veh_ids"])
print("peer", self.obs_dict_hist[-1]["veh_relation_peer"])
print("conflict", self.obs_dict_hist[-1]["veh_relation_conflict"])
print("ahead", self.obs_dict_hist[-1]["veh_relation_ahead"])
print("next", self.obs_dict_hist[-1]["veh_relation_next"])
print("in_intersection", self.obs_dict_hist[-1]["in_intersection"])
print("has_priority", self.obs_dict_hist[-1]["has_priority"])
print("ego_has_priority", self.obs_dict_hist[-1]["ego_has_priority"])
print(self.obs_dict_hist[-1]["ttc"])
print(get_reward_list(self)[0][1])
"""
return obs_dict, get_reward_list(self), self.env_state, info
except (traci.FatalTraCIError, traci.TraCIException):
self.env_state = EnvState.ERROR
raise
| {"/main.py": ["/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py", "/dqn_cfgs.py", "/workers.py"], "/sumo_cfgs.py": ["/sumo_gym.py"], "/github-release/sumo_gym.py": ["/observation.py", "/reward.py"], "/examples.py": ["/sumo_cfgs.py", "/sumo_gym.py"], "/github-release/main.py": ["/observation.py", "/sumo_gym.py", "/dqn.py"], "/test.py": ["/sumo_cfgs.py", "/sumo_gym.py", "/observation.py"], "/workers.py": ["/observation.py", "/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py"], "/dqn.py": ["/sumo_cfgs.py", "/replay_mem.py"], "/load_pretrain.py": ["/replay_mem.py", "/dqn.py", "/dqn_cfgs.py", "/sumo_cfgs.py"], "/sumo_gym.py": ["/observation.py", "/reward.py"], "/dqn_cfgs.py": ["/sumo_cfgs.py", "/dqn.py"]} |
54,409 | pulinau/sumo_rl_driving | refs/heads/main | /reward.py | #!/bin/python3
__author__ = "Changjian Li"
from include import *
import numpy as np
def get_reward_list(env):
r_validity, d_validity = None, None
r_safety, d_safety, violation_safety = get_reward_safety(env)
r_regulation, d_regulation, violation_yield, violation_turn = get_reward_regulation(env)
r_speed_comfort, d_speed_comfort = None, None
return ([r_validity, r_safety, r_regulation, r_speed_comfort],
[d_validity, d_safety, d_regulation, d_speed_comfort],
[violation_safety, violation_yield, violation_turn])
def get_reward_safety(env):
rewards = []
dones = []
obs_dict = env.obs_dict_hist[-1]
old_obs_dict = None
if len(env.obs_dict_hist) > 1:
old_obs_dict = env.obs_dict_hist[-2]
action_dict = env.action_dict_hist[-1]
violated = False
if env.env_state == EnvState.CRASH:
violated = True
for i, c in enumerate(obs_dict["collision"]):
r = 0
d = False
if (old_obs_dict is not None and
obs_dict["is_new"][i] == 0 and
obs_dict["veh_relation_none"] != 1 and
(obs_dict["veh_relation_ahead"][i] == 1 or
((obs_dict["veh_relation_conflict"][i] == 1 or obs_dict["veh_relation_peer"][i] == 1) and
obs_dict["in_intersection"][i] == 1 and
obs_dict["ego_in_intersection"] == 1)
) and
((abs(old_obs_dict["ttc"][i]) > abs(obs_dict["ttc"][i]) + 1e-6 and
(np.linalg.norm(old_obs_dict["relative_position"][i]) < 8 or old_obs_dict["ttc"][i] < 4) and
(action_dict["accel_level"] != ActionAccel.MAXDECEL or obs_dict["veh_relation_behind"][i] == 1)) or
(np.linalg.norm(old_obs_dict["relative_position"][i]) > np.linalg.norm(obs_dict["relative_position"][i]) + 1e-6 and
np.linalg.norm(old_obs_dict["relative_position"][i]) < 7 and
(action_dict["accel_level"] != ActionAccel.MAXDECEL or obs_dict["veh_relation_ahead"][i] != 1)
))
) or (env.env_state == EnvState.CRASH and c == 1
) or (action_dict["lane_change"] != ActionLaneChange.NOOP and (obs_dict["ttc"][i] < 1)
):
print(obs_dict["veh_ids"][i], "old_ttc", old_obs_dict["ttc"][i], "ttc", obs_dict["ttc"][i],
"pos", np.linalg.norm(old_obs_dict["relative_position"][i]), "action", action_dict,
"collision", c)
r = -1
if obs_dict["is_new"][i] == 1 or r == -1:
d = True
rewards += [[r]]
dones += [[d]]
return (rewards, dones, violated)
def get_reward_regulation(env):
r = 0
done = False
violated_turn = False
violated_yield = False
obs_dict = env.obs_dict_hist[-1]
old_obs_dict = None
if len(env.obs_dict_hist) > 1:
old_obs_dict = env.obs_dict_hist[-2]
action_dict = env.action_dict_hist[-1]
if obs_dict["ego_dist_to_end_of_lane"] < 100:
if obs_dict["ego_correct_lane_gap"] != 0:
r = 1/(1 + np.exp(-0.1*(obs_dict["ego_dist_to_end_of_lane"]-60))) - 1
old_tte = None
if old_obs_dict is not None:
old_tte = old_obs_dict["ego_dist_to_end_of_lane"] / (old_obs_dict["ego_speed"] + 1e-6)
tte = obs_dict["ego_dist_to_end_of_lane"] / (obs_dict["ego_speed"] + 1e-6)
if old_tte is not None and \
(old_tte < 4 or old_obs_dict["ego_dist_to_end_of_lane"] < 1) and \
obs_dict["ego_has_priority"] != 1 and \
obs_dict["ego_in_intersection"] != 1 and \
old_tte > tte + 1e-6 and \
action_dict["accel_level"] != ActionAccel.MAXDECEL:
print("regulation: old_tte", old_tte, " tte", tte)
done = True
r = -1
if old_tte is not None and \
obs_dict["ego_has_priority"] == 1 and \
obs_dict["ego_in_intersection"] != 1 and \
old_tte < tte - 1e-6:
r = -0.02
if (tte < 0.15 and obs_dict["ego_correct_lane_gap"] != 0):
violated_turn = True
if (old_obs_dict is not None and old_obs_dict["ego_has_priority"] != 1 and
old_obs_dict["ego_in_intersection"] != 1 and obs_dict["ego_in_intersection"] == 1):
violated_yield = True
if obs_dict["ego_priority_changed"] == 1 or obs_dict["ego_edge_changed"] == 1:
done = True
return ([[r]], [[done]], violated_yield, violated_turn)
| {"/main.py": ["/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py", "/dqn_cfgs.py", "/workers.py"], "/sumo_cfgs.py": ["/sumo_gym.py"], "/github-release/sumo_gym.py": ["/observation.py", "/reward.py"], "/examples.py": ["/sumo_cfgs.py", "/sumo_gym.py"], "/github-release/main.py": ["/observation.py", "/sumo_gym.py", "/dqn.py"], "/test.py": ["/sumo_cfgs.py", "/sumo_gym.py", "/observation.py"], "/workers.py": ["/observation.py", "/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py"], "/dqn.py": ["/sumo_cfgs.py", "/replay_mem.py"], "/load_pretrain.py": ["/replay_mem.py", "/dqn.py", "/dqn_cfgs.py", "/sumo_cfgs.py"], "/sumo_gym.py": ["/observation.py", "/reward.py"], "/dqn_cfgs.py": ["/sumo_cfgs.py", "/dqn.py"]} |
54,410 | pulinau/sumo_rl_driving | refs/heads/main | /github-release/observation.py | #!python3
__author__ = "Changjian Li"
import heapq
from math import pi
from include import *
def get_observation_space(env):
observation_space = spaces.Dict({"ego_speed": spaces.Box(0, env.MAX_VEH_SPEED, shape=(1,), dtype=np.float32),
"ego_dist_to_end_of_lane": spaces.Box(0, env.OBSERVATION_RADIUS, shape=(1,), dtype=np.float32),
"ego_in_intersection": spaces.Discrete(2),
"ego_exists_left_lane": spaces.Discrete(2),
"ego_exists_right_lane": spaces.Discrete(2),
"ego_correct_lane_gap": spaces.Box(-env.NUM_LANE_CONSIDERED, env.NUM_LANE_CONSIDERED, shape=(1,), dtype=np.int16),
"exists_vehicle": spaces.MultiBinary(env.NUM_VEH_CONSIDERED),
"speed": spaces.Box(0, env.MAX_VEH_SPEED, (env.NUM_VEH_CONSIDERED,)), # absolute speed
"dist_to_end_of_lane": spaces.Box(0, env.OBSERVATION_RADIUS, (env.NUM_VEH_CONSIDERED,), dtype=np.float32),
"in_intersection": spaces.MultiBinary(env.NUM_VEH_CONSIDERED),
"relative_position": spaces.Box(-env.OBSERVATION_RADIUS, env.OBSERVATION_RADIUS, (env.NUM_VEH_CONSIDERED, 2), dtype=np.float32),
"relative_heading": spaces.Box(-pi, pi, (env.NUM_VEH_CONSIDERED,), dtype=np.float32),
"has_priority": spaces.MultiBinary(env.NUM_VEH_CONSIDERED),
"veh_relation_peer": spaces.MultiBinary(env.NUM_VEH_CONSIDERED),
"veh_relation_conflict": spaces.MultiBinary(env.NUM_VEH_CONSIDERED),
"veh_relation_next": spaces.MultiBinary(env.NUM_VEH_CONSIDERED),
"veh_relation_prev": spaces.MultiBinary(env.NUM_VEH_CONSIDERED),
"veh_relation_left": spaces.MultiBinary(env.NUM_VEH_CONSIDERED),
"veh_relation_right": spaces.MultiBinary(env.NUM_VEH_CONSIDERED),
"veh_relation_ahead": spaces.MultiBinary(env.NUM_VEH_CONSIDERED),
"veh_relation_behind": spaces.MultiBinary(env.NUM_VEH_CONSIDERED)
})
return observation_space
def get_veh_dict(env):
"""get the current state of all vehicles."""
veh_id_list = env.tc.vehicle.getIDList()
veh_dict = {}
for veh_id in veh_id_list:
veh_dict[veh_id] = {}
veh_dict[veh_id]["type"] = "vehicle"
veh_dict[veh_id]["position"] = env.tc.vehicle.getPosition(veh_id)
veh_dict[veh_id]["angle"] = env.tc.vehicle.getAngle(veh_id) # in degree. North is zero, clockwise
veh_dict[veh_id]["speed"] = env.tc.vehicle.getSpeed(veh_id)
veh_dict[veh_id]["dimension"] = (env.tc.vehicle.getLength(veh_id),env.tc.vehicle.getWidth(veh_id))
veh_dict[veh_id]["edge_id"] = env.tc.vehicle.getRoadID(veh_id)
veh_dict[veh_id]["lane_id"] = env.tc.vehicle.getLaneID(veh_id)
veh_dict[veh_id]["lane_index"] = env.tc.vehicle.getLaneIndex(veh_id)
veh_dict[veh_id]["lane_length"] = env.tc.lane.getLength(veh_dict[veh_id]["lane_id"])
veh_dict[veh_id]["lane_position"] = env.tc.vehicle.getLanePosition(veh_id) # position in the lane
veh_dict[veh_id]["route"] = env.tc.vehicle.getRoute(veh_id)
route = veh_dict[veh_id]["route"] # route is an edge id list of the vehicle's route
if len(route) > env.tc.vehicle.getRouteIndex(veh_id) + 1:
veh_dict[veh_id]["next_normal_edge_id"] = route[env.tc.vehicle.getRouteIndex(veh_id) + 1]
else:
veh_dict[veh_id]["next_normal_edge_id"] = None
return veh_dict
def get_lanelet_dict(sumo_net_xml_file):
net = sumolib.net.readNet(sumo_net_xml_file, withInternal=True)
lanelet_dict = {}
edges = net.getEdges()
for edge in edges:
# add "next" and "previous" connection
# since there's no direct sumolib API to get previous lane, we need to do some tricks here
for lane in edge.getLanes():
lane_id = lane.getID()
lanelet_dict[lane_id] = {}
lanelet_dict[lane_id]["prev_lane_id_list"] = []
lanelet_dict[lane_id]["prev_normal_lane_id_list"] = []
for edge in edges:
for lane in edge.getLanes():
lane_id = lane.getID()
lane_index = lane.getIndex()
lanelet_dict[lane_id]["waypoint"] = lane.getShape()
lanelet_dict[lane_id]["from_node_id"] = edge.getFromNode().getID()
lanelet_dict[lane_id]["to_node_id"] = edge.getToNode().getID()
lanelet_dict[lane_id]["edge_id"] = edge.getID()
lanelet_dict[lane_id]["lane_index"] = lane_index
if lane_id[0] == ':':
lanelet_dict[lane_id]["edge_priority"] = float("inf")
else:
lanelet_dict[lane_id]["edge_priority"] = edge.getPriority()
lanelet_dict[lane_id]["next_normal_lane_id_list"] = [conn.getToLane().getID() for conn in lane.getOutgoing()]
if lane_id[0] == ':':
lanelet_dict[lane_id]["next_lane_id_list"] = [conn.getToLane().getID() for conn in lane.getOutgoing()]
else:
lanelet_dict[lane_id]["next_lane_id_list"] = [conn.getViaLaneID() for conn in lane.getOutgoing()]
for next_lane_id in lanelet_dict[lane_id]["next_normal_lane_id_list"] + lanelet_dict[lane_id]["next_lane_id_list"]:
lanelet_dict[next_lane_id]["prev_normal_lane_id_list"] += [lane_id]
for next_lane_id in lanelet_dict[lane_id]["next_lane_id_list"]:
lanelet_dict[next_lane_id]["prev_lane_id_list"] += [lane_id]
if lane_index == len(edge.getLanes()) - 1:
lanelet_dict[lane_id]["left_lane_id"] = None
else:
lanelet_dict[lane_id]["left_lane_id"] = edge.getLanes()[lane_index+1].getID()
if lane_index == 0:
lanelet_dict[lane_id]["right_lane_id"] = None
else:
lanelet_dict[lane_id]["right_lane_id"] = edge.getLanes()[lane_index-1].getID()
# "left" and "right" connections for opposite direction lane are not added
# now ignore the internal edges/lanes
net = sumolib.net.readNet(sumo_net_xml_file, withInternal=False)
edges = net.getEdges()
for edge in edges:
for lane in edge.getLanes():
lane_id = lane.getID()
lanelet_dict[lane_id]["next_normal_lane_id_list"] = [conn.getToLane().getID() for conn in lane.getOutgoing()]
for next_lane_id in lanelet_dict[lane_id]["next_lane_id_list"]:
lanelet_dict[next_lane_id]["prev_normal_lane_id_list"] += [lane_id]
return lanelet_dict
def get_edge_dict(sumo_net_xml_file):
net = sumolib.net.readNet(sumo_net_xml_file, withInternal=True)
edge_dict = {}
edges = net.getEdges()
for edge in edges:
edge_id = edge.getID()
edge_dict[edge_id] = {}
edge_dict[edge_id]["lane_id_list"] = [lane.getID() for lane in edge.getLanes()]
edge_dict[edge_id]["from_node_id"] = edge.getFromNode().getID()
edge_dict[edge_id]["to_node_id"] = edge.getToNode().getID()
# intersection has the highest priority
if edge_id[0] == ':':
edge_dict[edge_id]["priority"] = float("inf")
else:
edge_dict[edge_id]["priority"] = edge.getPriority()
return edge_dict
def get_obs_dict(env):
veh_dict = get_veh_dict(env)
lanelet_dict = get_lanelet_dict(env.NET_XML_FILE)
edge_dict = get_edge_dict(env.NET_XML_FILE)
obs_dict = {}
ego_dict = veh_dict[env.EGO_VEH_ID]
lane_id_list_ego_edge = edge_dict[ego_dict["edge_id"]]["lane_id_list"]
if ego_dict["next_normal_edge_id"] != None:
lane_id_list_ego_next_normal_edge = edge_dict[ego_dict["next_normal_edge_id"]]["lane_id_list"]
else:
lane_id_list_ego_next_normal_edge = []
obs_dict["ego_speed"] = ego_dict["speed"]
obs_dict["ego_dist_to_end_of_lane"] = min(ego_dict["lane_length"] - ego_dict["lane_position"], env.OBSERVATION_RADIUS)
# lanes inside intersections have ids that start with ":"
if ego_dict["lane_id"][0] == ":":
obs_dict["ego_in_intersection"] = 1
else:
obs_dict["ego_in_intersection"] = 0
# couldChangeLane has a time lag of one step, a workaround is needed until this is fixed
#if env.tc.vehicle.couldChangeLane(env.EGO_VEH_ID, 1):
if ego_dict["lane_index"] < len(lane_id_list_ego_edge)-1:
obs_dict["ego_exists_left_lane"] = 1
else:
obs_dict["ego_exists_left_lane"] = 0
if ego_dict["lane_index"] != 0:
obs_dict["ego_exists_right_lane"] = 1
else:
obs_dict["ego_exists_right_lane"] = 0
# correct lane
# if next normal edge doesn't exist, consider ego to be already in correct lane
obs_dict["ego_correct_lane_gap"] = 0
for x in lane_id_list_ego_next_normal_edge:
for y in lane_id_list_ego_edge:
if internal_lane_id_between_lanes(y, x, lanelet_dict)!= None:
obs_dict["ego_correct_lane_gap"] = lanelet_dict[y]["lane_index"] - ego_dict["lane_index"]
if obs_dict["ego_correct_lane_gap"] > 0:
obs_dict["ego_correct_lane_gap"] = min(obs_dict["ego_correct_lane_gap"], env.NUM_LANE_CONSIDERED)
else:
obs_dict["ego_correct_lane_gap"] = max(obs_dict["ego_correct_lane_gap"], -env.NUM_LANE_CONSIDERED)
# vehicles inside region of insterest
def in_ROI(ego_position, veh_position):
if ((veh_position[0] > ego_position[0]-env.OBSERVATION_RADIUS) and
(veh_position[1] > ego_position[1]-env.OBSERVATION_RADIUS) and
(veh_position[0] < ego_position[0]+env.OBSERVATION_RADIUS) and
(veh_position[1] < ego_position[1]+env.OBSERVATION_RADIUS)
):
return True
return False
veh_id_list_ROI = [k for k, v in veh_dict.items() if k!=env.EGO_VEH_ID and in_ROI(ego_dict["position"], v["position"])]
# now deal with the relavant vehicles
obs_dict["exists_vehicle"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["speed"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["dist_to_end_of_lane"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["in_intersection"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["relative_position"] = [[0, 0]] * env.NUM_VEH_CONSIDERED
obs_dict["relative_heading"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["has_priority"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["veh_relation_peer"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["veh_relation_conflict"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["veh_relation_next"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["veh_relation_prev"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["veh_relation_left"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["veh_relation_right"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["veh_relation_ahead"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["veh_relation_behind"] = [0] * env.NUM_VEH_CONSIDERED
obs_dict["veh_relation_irrelevant"] = [0] * env.NUM_VEH_CONSIDERED
# sort veh within ROI by distance to ego
veh_heap = []
for veh_id in veh_id_list_ROI:
state_dict = veh_dict[veh_id]
heapq.heappush(veh_heap, (np.linalg.norm(np.array(state_dict["position"]) - np.array(ego_dict["position"])), veh_id))
for veh_index in range(min(env.NUM_VEH_CONSIDERED, len(veh_heap))):
_, veh_id = heapq.heappop(veh_heap)
state_dict = veh_dict[veh_id]
obs_dict["exists_vehicle"][veh_index] = 1
obs_dict["speed"][veh_index] = state_dict["speed"]
obs_dict["dist_to_end_of_lane"][veh_index] = min(state_dict["lane_length"] - state_dict["lane_position"], env.OBSERVATION_RADIUS)
if state_dict["edge_id"][0] == ':':
obs_dict["in_intersection"][veh_index] = 1
# transform the position to ego coordinate
ego_angle_rad = ego_dict["angle"]/180 * pi
rotation_mat = np.array([[np.cos(ego_angle_rad), -np.sin(ego_angle_rad)],
[np.sin(ego_angle_rad), np.cos(ego_angle_rad)]])
relative_position = np.array(state_dict["position"]) - np.array(ego_dict["position"])
relative_position = np.matmul(rotation_mat,relative_position)
obs_dict["relative_position"][veh_index] = relative_position
relative_heading = -(state_dict["angle"] - ego_dict["angle"])/180 * pi
if relative_heading > pi:
relative_heading -= 2*pi
elif relative_heading < -pi:
relative_heading += 2*pi
obs_dict["relative_heading"][veh_index] = relative_heading
# vehicle has priority over ego if the vehicle is
# approaching/in the same intersection and it's inside a lane of higher priority
# note that intersections (internal edges) are assigned the highest priority
if edge_dict[state_dict["edge_id"]]["to_node_id"] == edge_dict[ego_dict["edge_id"]]["to_node_id"] and \
edge_dict[state_dict["edge_id"]]["priority"] > edge_dict[ego_dict["edge_id"]]["priority"]:
obs_dict["has_priority"][veh_index] = 1
# check if the each of the possible relationship holds for the vehicle
lane_id_list_veh_edge = edge_dict[state_dict["edge_id"]]["lane_id_list"]
if state_dict["next_normal_edge_id"] != None:
lane_id_list_veh_next_normal_edge = edge_dict[state_dict["next_normal_edge_id"]]["lane_id_list"]
else:
lane_id_list_veh_next_normal_edge = []
# PEER if vehicle share the same next lane, since we only have edge (not lane) information within the route, we need to
# search inside the next edge to see if there're any lanes whose previous lane belongs to the current edge of veh
if state_dict["next_normal_edge_id"] == ego_dict["next_normal_edge_id"] and ego_dict["next_normal_edge_id"] != None:
for x in lane_id_list_ego_next_normal_edge:
for y in lane_id_list_veh_edge:
if internal_lane_id_between_lanes(y, x, lanelet_dict) != None:
obs_dict["veh_relation_peer"][veh_index] = 1 # PEER
# CONFLICT if approaching/in the same intersection as the ego lane, and its route conflict that of the ego route
if edge_dict[ego_dict["edge_id"]]["to_node_id"] == edge_dict[state_dict["edge_id"]]["to_node_id"]:
for u in lane_id_list_veh_next_normal_edge:
for v in lane_id_list_veh_edge:
lane_id0 = internal_lane_id_between_lanes(v, u, lanelet_dict)
for p in lane_id_list_ego_next_normal_edge:
for q in lane_id_list_ego_edge:
lane_id1 = internal_lane_id_between_lanes(q, p, lanelet_dict)
if lane_id0 != None and lane_id1 != None:
if waypoint_intersect(lanelet_dict[lane_id0]["waypoint"], lanelet_dict[lane_id1]["waypoint"]) == True:
obs_dict["veh_relation_conflict"][veh_index] = 1 # CONFLICT
# NEXT, PREV
if state_dict["lane_id"] in lanelet_dict[ego_dict["lane_id"]]["next_lane_id_list"]:
obs_dict["veh_relation_next"][veh_index] = 1 # NEXT
if ego_dict["lane_id"] in lanelet_dict[state_dict["lane_id"]]["next_lane_id_list"]:
obs_dict["veh_relation_prev"][veh_index] = 1 # PREV
# LEFT, RIGHT
if state_dict["lane_id"] == lanelet_dict[ego_dict["lane_id"]]["left_lane_id"]:
obs_dict["veh_relation_left"][veh_index] = 1 # LEFT
if state_dict["lane_id"] == lanelet_dict[ego_dict["lane_id"]]["right_lane_id"]:
obs_dict["veh_relation_right"][veh_index] = 1 # RIGHT
# AHEAD, BEHIND
if state_dict["lane_id"] == ego_dict["lane_id"]:
if state_dict["lane_position"] > ego_dict["lane_position"]:
obs_dict["veh_relation_ahead"][veh_index] = 1 # AHEAD
else:
obs_dict["veh_relation_behind"][veh_index] = 1 # BEHIND
pass
return obs_dict
def intersect(p0, p1, q0, q1):
"""check if two line segments p0-p1, q0-q1 intersect"""
def ccw(a,b,c):
"""check if the three points are in counterclockwise order"""
return (c[1]-a[1])*(b[0]-a[0]) > (b[1]-a[1])*(c[0]-a[0])
return ccw(p0,q0,q1) != ccw(p1,q0,q1) and ccw(p0,p1,q0) != ccw(p0,p1,q1)
def waypoint_intersect(waypoints0, waypoints1):
for m in range(len(waypoints0)-1):
for n in range(len(waypoints1)-1):
if intersect(waypoints0[m], waypoints0[m+1], waypoints1[n], waypoints1[n+1]):
return True
return False
def internal_lane_id_between_lanes(from_lane_id, to_lane_id, lanelet_dict):
if from_lane_id[0] == ':':
if to_lane_id in lanelet_dict[from_lane_id]["next_lane_id_list"]:
return from_lane_id
elif to_lane_id[0] == ':':
if to_lane_id in lanelet_dict[from_lane_id]["next_lane_id_list"]:
return to_lane_id
else:
for lane_id in lanelet_dict[from_lane_id]["next_lane_id_list"]:
if lane_id in lanelet_dict[to_lane_id]["prev_lane_id_list"]:
return lane_id
return None
| {"/main.py": ["/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py", "/dqn_cfgs.py", "/workers.py"], "/sumo_cfgs.py": ["/sumo_gym.py"], "/github-release/sumo_gym.py": ["/observation.py", "/reward.py"], "/examples.py": ["/sumo_cfgs.py", "/sumo_gym.py"], "/github-release/main.py": ["/observation.py", "/sumo_gym.py", "/dqn.py"], "/test.py": ["/sumo_cfgs.py", "/sumo_gym.py", "/observation.py"], "/workers.py": ["/observation.py", "/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py"], "/dqn.py": ["/sumo_cfgs.py", "/replay_mem.py"], "/load_pretrain.py": ["/replay_mem.py", "/dqn.py", "/dqn_cfgs.py", "/sumo_cfgs.py"], "/sumo_gym.py": ["/observation.py", "/reward.py"], "/dqn_cfgs.py": ["/sumo_cfgs.py", "/dqn.py"]} |
54,411 | pulinau/sumo_rl_driving | refs/heads/main | /mp.py | import multiprocessing as mp
parent_conn_list, child_conn_list = zip(*[mp.Pipe() for _ in range(4)])
Simulation(conn, sumo_cfg)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--play")
args = parser.parse_args()
env_list = [MultiObjSumoEnv(sumo_cfg) for i in range(6)]
env = MultiObjSumoEnv(sumo_cfg)
EPISODES = 60000
if args.play:
print("True")
for dqn_cfg in [cfg_safety, cfg_regulation, cfg_comfort, cfg_speed]:
dqn_cfg.play = True
EPISODES = 10
dqn_cfg_list = [cfg_safety, cfg_regulation, cfg_comfort, cfg_speed]
parent_conn_list, child_conn_list = zip(*[mp.Pipe() for _ in range(4)])
p_list = [mp.Process(target=Qlearning, args=(conn, sumo_cfg, dqn_cfg)) for conn, dqn_cfg in
zip(child_conn_list, dqn_cfg_list)]
[p.start() for p in p_list]
for e in range(EPISODES):
print("episode: {}/{}".format(e, EPISODES))
obs_dict = env.reset()
for step in range(6400):
# env.agt_ctrl = False
if args.play:
env.agt_ctrl = True
elif step == 0:
if random.uniform(0, 1) < 0.5:
env.agt_ctrl = True
else:
env.agt_ctrl = False
else:
if random.uniform(0, 1) < 0.01:
if env.agt_ctrl == True:
env.agt_ctrl = False
else:
env.agt_ctrl = True
[conn.send(True) for conn in parent_conn_list]
# send obs_dict
[conn.send(obs_dict) for conn in parent_conn_list]
import time
print("entering: ", time.time())
# select action
action_set_list, explr_set_list = zip(*[conn.recv() for conn in parent_conn_list])
action, action_info = select_action(action_set_list, explr_set_list, dqn_cfg_list)
if env.agt_ctrl == False:
action_info == "sumo"
print("exiting: ", time.time())
# act
next_obs_dict, reward_list, env_state, action_dict = env.step(
{"lane_change": ActionLaneChange(action // 7), "accel_level": ActionAccel(action % 7)})
if env_state == EnvState.DONE:
print("Ego successfully drived out of scene, step: ", step)
[conn.send((next_obs_dict, reward, env_state, action_dict)) for conn, reward in
zip(parent_conn_list, reward_list)]
# save model
if step % 100 == 1:
[conn.send(True) for conn in parent_conn_list]
else:
[conn.send(False) for conn in parent_conn_list]
obs_dict = next_obs_dict
if env_state != EnvState.NORMAL or step == 6400 - 1:
print("Simulation Terminated, step: ", step, action_dict, action_info, reward_list, env_state, env.agt_ctrl)
break
[conn.send(False) for conn in parent_conn_list]
[p.join() for p in p_list] | {"/main.py": ["/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py", "/dqn_cfgs.py", "/workers.py"], "/sumo_cfgs.py": ["/sumo_gym.py"], "/github-release/sumo_gym.py": ["/observation.py", "/reward.py"], "/examples.py": ["/sumo_cfgs.py", "/sumo_gym.py"], "/github-release/main.py": ["/observation.py", "/sumo_gym.py", "/dqn.py"], "/test.py": ["/sumo_cfgs.py", "/sumo_gym.py", "/observation.py"], "/workers.py": ["/observation.py", "/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py"], "/dqn.py": ["/sumo_cfgs.py", "/replay_mem.py"], "/load_pretrain.py": ["/replay_mem.py", "/dqn.py", "/dqn_cfgs.py", "/sumo_cfgs.py"], "/sumo_gym.py": ["/observation.py", "/reward.py"], "/dqn_cfgs.py": ["/sumo_cfgs.py", "/dqn.py"]} |
54,412 | pulinau/sumo_rl_driving | refs/heads/main | /github-release/include.py | import sys
SUMO_TOOLS_DIR = "/home/ken/project/sumo-bin/tools"
try:
sys.path.append(SUMO_TOOLS_DIR)
except ImportError:
print("Please modify SUMO_TOOLS_DIR to the location of sumo tools")
import traci
import sumolib
import numpy as np
import gym
from gym import spaces
from enum import Enum, auto
# simulation
class EnvState(Enum):
DONE = auto()
CRASH = auto()
NORMAL = auto()
NOT_STARTED = auto()
ERROR = auto()
# action
class ActionLaneChange(Enum):
NOOP = 0
LEFT = 1
RIGHT = 2
class ActionAccel(Enum):
MAXDECEL = 0
MEDDECEL = 1
MINDECEL = 2
NOOP = 3
MINACCEL = 4
MEDACCEL = 5
MAXACCEL = 6
| {"/main.py": ["/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py", "/dqn_cfgs.py", "/workers.py"], "/sumo_cfgs.py": ["/sumo_gym.py"], "/github-release/sumo_gym.py": ["/observation.py", "/reward.py"], "/examples.py": ["/sumo_cfgs.py", "/sumo_gym.py"], "/github-release/main.py": ["/observation.py", "/sumo_gym.py", "/dqn.py"], "/test.py": ["/sumo_cfgs.py", "/sumo_gym.py", "/observation.py"], "/workers.py": ["/observation.py", "/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py"], "/dqn.py": ["/sumo_cfgs.py", "/replay_mem.py"], "/load_pretrain.py": ["/replay_mem.py", "/dqn.py", "/dqn_cfgs.py", "/sumo_cfgs.py"], "/sumo_gym.py": ["/observation.py", "/reward.py"], "/dqn_cfgs.py": ["/sumo_cfgs.py", "/dqn.py"]} |
54,413 | pulinau/sumo_rl_driving | refs/heads/main | /dqn_cfgs.py | #!python3
__author__ = "Changjian Li"
import numpy as np
import tensorflow as tf
from include import *
from sumo_cfgs import *
from dqn import DQNCfg
def reshape_validity(obs_dict):
out = np.array([obs_dict["ego_exists_left_lane"], obs_dict["ego_exists_right_lane"], obs_dict["ego_in_intersection"]], dtype=np.int32)
return [np.reshape(out, (1, -1))]
def select_actions_validity(state):
ego_exists_left_lane = state[0][0][0]
ego_exists_right_lane = state[0][0][1]
ego_in_intersection = state[0][0][2]
if (ego_exists_left_lane == 0 and ego_exists_right_lane == 0) or ego_in_intersection == 1:
valid = [ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.NOOP.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXDECEL.value
]
sorted_idx = [ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.NOOP.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXDECEL.value,
7, 8]
if ego_exists_left_lane == 0 and ego_exists_right_lane == 1:
valid = [ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.NOOP.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXDECEL.value,
8]
sorted_idx = [ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.NOOP.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXDECEL.value,
8, 7]
if ego_exists_left_lane == 1 and ego_exists_right_lane == 0:
valid = [ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.NOOP.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXDECEL.value,
7]
sorted_idx = [ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.NOOP.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXDECEL.value,
7, 8]
if ego_exists_left_lane == 1 and ego_exists_right_lane == 1:
valid = [ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.NOOP.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXDECEL.value,
7, 8]
sorted_idx = [ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.NOOP.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXDECEL.value,
7, 8]
return (set(valid), sorted_idx)
def reshape_safety(obs_dict):
"""reshape gym observation to keras neural network input"""
# sqrt is used to strech the input to emphasize the near zero part
o0 = np.array([np.sqrt(obs_dict["ego_speed"]/MAX_VEH_SPEED) - 0.5,
np.sqrt(min(obs_dict["ego_dist_to_end_of_lane"]/OBSERVATION_RADIUS, 1.0)) - 0.5,
obs_dict["ego_in_intersection"] - 0.5,
obs_dict["ego_exists_left_lane"] - 0.5,
obs_dict["ego_exists_right_lane"] - 0.5
], dtype = np.float32)
o1 = np.reshape(np.array([], dtype = np.float32), (0, NUM_VEH_CONSIDERED))
o1 = np.append(o1, np.array([obs_dict["exists_vehicle"]]) - 0.5, axis=0)
o1 = np.append(o1, np.array([obs_dict["in_intersection"]]) - 0.5, axis=0)
o1 = np.append(o1, np.array([obs_dict["brake_signal"]]) - 0.5, axis=0)
o1 = np.append(o1, np.array([obs_dict["left_signal"]]) - 0.5, axis=0)
o1 = np.append(o1, np.array([obs_dict["right_signal"]]) - 0.5, axis=0)
rel_speed = np.array([obs_dict["relative_speed"]]) / MAX_VEH_SPEED + 0.5
rel_speed = np.minimum(np.sqrt(np.abs(rel_speed)), np.ones((1, NUM_VEH_CONSIDERED))*0.5) * np.sign(rel_speed)
o1 = np.append(o1, rel_speed , axis=0)
o1 = np.append(o1, np.sqrt(np.minimum(np.array([obs_dict["dist_to_end_of_lane"]])/OBSERVATION_RADIUS,
np.ones((1, NUM_VEH_CONSIDERED)))) - 0.5, axis = 0)
rel_pos = np.array(obs_dict["relative_position"]).T / 2 * OBSERVATION_RADIUS
rel_pos = np.sqrt(np.abs(rel_pos)) * np.sign(rel_pos)
o1 = np.append(o1, rel_pos, axis=0)
o1 = np.append(o1, np.array([obs_dict["relative_heading"]])/2*np.pi, axis=0)
o1 = np.append(o1, np.array([obs_dict["veh_relation_peer"]]) - 0.5, axis=0)
o1 = np.append(o1, np.array([obs_dict["veh_relation_conflict"]]) - 0.5, axis=0)
o1 = np.append(o1, np.array([obs_dict["veh_relation_left"]]) - 0.5, axis=0)
o1 = np.append(o1, np.array([obs_dict["veh_relation_right"]]) - 0.5, axis=0)
o1 = np.append(o1, np.array([obs_dict["veh_relation_ahead"]]) - 0.5, axis=0)
o1 = np.append(o1, np.array([obs_dict["veh_relation_behind"]]) - 0.5, axis=0)
ttc = np.array([obs_dict["ttc"]]) / MAX_TTC_CONSIDERED
ttc = np.sqrt(np.abs(ttc)) * np.sign(ttc)
o1 = np.append(o1, ttc - 0.5, axis=0)
o = [o0] + [x for x in o1.T]
return [[x] for x in o]
tf_cfg_safety = tf.ConfigProto()
tf_cfg_safety.gpu_options.per_process_gpu_memory_fraction = 0.4
#tf_cfg_safety = tf.ConfigProto(device_count = {"GPU": 0})
def build_model_safety():
ego_input = tf.keras.layers.Input(shape=(5, ))
ego_l1 = tf.keras.layers.Dense(64, activation=None)(ego_input)
veh_inputs = [tf.keras.layers.Input(shape=(17,)) for _ in range(NUM_VEH_CONSIDERED)]
shared_Dense1 = tf.keras.layers.Dense(64, activation=None)
veh_l = [shared_Dense1(x) for x in veh_inputs]
veh_l = [tf.keras.layers.add([ego_l1, x]) for x in veh_l]
veh_l = [tf.keras.layers.Activation("sigmoid")(x) for x in veh_l]
n_layers = 2
Dense_list = [tf.keras.layers.Dense(64, activation=None) for _ in range(n_layers)]
for i in range(n_layers):
veh_l = [Dense_list[i](x) for x in veh_l]
veh_l = [tf.keras.layers.Activation("sigmoid")(x) for x in veh_l]
shared_Dense2 = tf.keras.layers.Dense(reduced_action_size, activation=None)
veh_y = [shared_Dense2(x) for x in veh_l]
y = tf.keras.layers.minimum(veh_y)
model = tf.keras.models.Model(inputs=[ego_input] + veh_inputs, outputs=veh_y + [y])
opt = tf.keras.optimizers.RMSprop(lr=0.0001)
model.compile(loss='logcosh', optimizer=opt)
return model
def reshape_regulation(obs_dict):
lane_gap_1hot = [-0.5] * (2*NUM_LANE_CONSIDERED + 1)
lane_gap_1hot[obs_dict["ego_correct_lane_gap"] + NUM_LANE_CONSIDERED] = 0.5
tte = min(obs_dict["ego_dist_to_end_of_lane"] / (obs_dict["ego_speed"] + 1e-6), MAX_TTC_CONSIDERED)/MAX_TTC_CONSIDERED
o = np.array([np.sqrt(obs_dict["ego_speed"]/MAX_VEH_SPEED) - 0.5,
np.sqrt(min(obs_dict["ego_dist_to_end_of_lane"] / OBSERVATION_RADIUS, 1.0)) - 0.5,
np.sqrt(tte) - 0.5,
obs_dict["ego_in_intersection"] - 0.5,
obs_dict["ego_has_priority"] - 0.5,
] + lane_gap_1hot, dtype = np.float32)
return [[o]]
tf_cfg_regulation = tf.ConfigProto()
tf_cfg_regulation.gpu_options.per_process_gpu_memory_fraction = 0.3
def build_model_regulation():
x = tf.keras.layers.Input(shape=(6 + 2*NUM_LANE_CONSIDERED, ))
for i in range(3):
l = tf.keras.layers.Dense(64, activation=None)(x)
l = tf.keras.layers.Activation('sigmoid')(l)
y = tf.keras.layers.Dense(reduced_action_size, activation='linear')(l)
model = tf.keras.models.Model(inputs=[x], outputs=[y, y])
opt = tf.keras.optimizers.RMSprop(lr=0.0001)
model.compile(loss='logcosh', optimizer=opt)
return model
def reshape_speed_comfort(obs_dict):
out = np.array([obs_dict["ego_speed"], obs_dict["ego_correct_lane_gap"]], dtype = np.float32)
return [np.reshape(out, (1,-1))]
def select_actions_speed_comfort(state):
ego_speed = state[0][0][0]
ego_correct_lane_gap = state[0][0][1]
if ego_speed < MAX_VEH_SPEED-1.4:
if ego_correct_lane_gap == 0:
valid = [ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXACCEL.value]
sorted_idx = [ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.NOOP.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXDECEL.value,
7, 8]
elif ego_correct_lane_gap > 0:
valid = [7,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXACCEL.value]
sorted_idx = [7,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.NOOP.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXDECEL.value,
8
]
else:
valid = [8,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXACCEL.value]
sorted_idx = [8,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.NOOP.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXDECEL.value,
7
]
elif ego_speed > MAX_VEH_SPEED + 1.4:
if ego_correct_lane_gap == 0:
valid = [ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINDECEL.value]
sorted_idx = [ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.NOOP.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXACCEL.value,
7, 8
]
elif ego_correct_lane_gap > 0:
valid = [7,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINDECEL.value]
sorted_idx = [7,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.NOOP.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXACCEL.value,
8
]
else:
valid = [8,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINDECEL.value]
sorted_idx = [8,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.NOOP.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXACCEL.value,
7
]
else:
if ego_correct_lane_gap == 0:
valid = [ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.NOOP.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINDECEL.value]
sorted_idx = [ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.NOOP.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXDECEL.value,
7, 8]
elif ego_correct_lane_gap > 0:
valid = [7,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.NOOP.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINDECEL.value]
sorted_idx = [7,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.NOOP.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXDECEL.value,
8]
elif ego_correct_lane_gap < 0:
valid = [8,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.NOOP.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINDECEL.value]
sorted_idx = [8,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.NOOP.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MINDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MEDDECEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXACCEL.value,
ActionLaneChange.NOOP.value * len(ActionAccel) + ActionAccel.MAXDECEL.value,
7]
return (set(valid), sorted_idx)
cfg_validity = DQNCfg(name = "validity",
play=False,
version=None,
resume = False,
state_size=2,
action_size=reduced_action_size,
low_target=None,
high_target=None,
gamma=None,
gamma_inc=None,
gamma_max=None,
epsilon=0,
epsilon_dec=0,
epsilon_min=0,
threshold=None,
memory_size=None,
traj_end_pred=None,
replay_batch_size=None,
traj_end_ratio= None,
_build_model=None,
model_rst_prob_list = [],
tf_cfg=None,
reshape=reshape_validity,
_select_actions=select_actions_validity)
class returnTrue():
def __init__(self):
pass
def __call__(self, x):
return True
cfg_safety = DQNCfg(name = "safety",
play = False,
version = "current",
resume = False,
state_size = 5 + 17*NUM_VEH_CONSIDERED,
action_size = reduced_action_size,
low_target=-1,
high_target=0,
gamma = 0.9,
gamma_inc = 1e-5,
gamma_max = 0.9,
epsilon = 0.6,
epsilon_dec = 1e-6,
epsilon_min = 0.4,
threshold = -0.20,
memory_size = 3200,
traj_end_pred = returnTrue(),
replay_batch_size = 320,
traj_end_ratio= 0.0001,
_build_model = build_model_safety,
model_rst_prob_list = [],
tf_cfg = tf_cfg_safety,
reshape = reshape_safety)
cfg_regulation = DQNCfg(name = "regulation",
play = False,
version = "current",
resume = False,
state_size = 6 + 2*NUM_LANE_CONSIDERED,
action_size = reduced_action_size,
low_target=-1,
high_target=0,
gamma = 0.90,
gamma_inc = 1e-5,
gamma_max = 0.95,
epsilon=0.6,
epsilon_dec=1e-5,
epsilon_min=0.6,
threshold = -0.15,
memory_size = 64000,
traj_end_pred = returnTrue(),
replay_batch_size = 320,
traj_end_ratio= 0.0001,
_build_model = build_model_regulation,
model_rst_prob_list = [],
tf_cfg = tf_cfg_regulation,
reshape = reshape_regulation)
cfg_speed_comfort = DQNCfg(name = "speed_comfort",
play = False,
version = None,
resume=False,
state_size = 2,
action_size = reduced_action_size,
low_target=None,
high_target=None,
gamma = None,
gamma_inc = None,
gamma_max = None,
epsilon=0,
epsilon_dec=0,
epsilon_min=0,
threshold = None,
memory_size = None,
traj_end_pred = None,
replay_batch_size = None,
traj_end_ratio= None,
_build_model = None,
model_rst_prob_list = [],
tf_cfg = None,
reshape = reshape_speed_comfort,
_select_actions=select_actions_speed_comfort)
| {"/main.py": ["/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py", "/dqn_cfgs.py", "/workers.py"], "/sumo_cfgs.py": ["/sumo_gym.py"], "/github-release/sumo_gym.py": ["/observation.py", "/reward.py"], "/examples.py": ["/sumo_cfgs.py", "/sumo_gym.py"], "/github-release/main.py": ["/observation.py", "/sumo_gym.py", "/dqn.py"], "/test.py": ["/sumo_cfgs.py", "/sumo_gym.py", "/observation.py"], "/workers.py": ["/observation.py", "/sumo_gym.py", "/dqn.py", "/sumo_cfgs.py"], "/dqn.py": ["/sumo_cfgs.py", "/replay_mem.py"], "/load_pretrain.py": ["/replay_mem.py", "/dqn.py", "/dqn_cfgs.py", "/sumo_cfgs.py"], "/sumo_gym.py": ["/observation.py", "/reward.py"], "/dqn_cfgs.py": ["/sumo_cfgs.py", "/dqn.py"]} |
54,414 | PatJByrne/msd_project | refs/heads/master | /distance_from_lat_long.py |
# coding: utf-8
# In[32]:
import numpy as np
Earth_Radius = 6.371e6
degrees = np.pi/180.
pixel_to_meter = 500./(497-300)
def distance(pt1,pt2):
'''Trick is to assume that the point of intest is the 'North Pole'. We're modeling
the Earth as a round ball, so starting from zero is fine. Distance takes no regard for
Longitude when you start from the north pole...
Requires two three-element vectors or lists. Each with the Lat, Long and elevation'''
Lat = np.array([pt1[0],pt2[0]])*degrees
Long = np.array([pt1[1],pt2[1]])*degrees
elev = np.array([pt1[2],pt2[2]])
Lat -= Lat[0]
#Long -= Long[0]
elev -= elev[0]
delta_el = elev[1]
delta_lat = Lat[1]
d_el_d_lat = delta_el/delta_lat
dist = Earth_Radius*delta_lat
correction = 0.5*d_el_d_lat*delta_lat**2
return(dist,correction)
def point_data_lat_long(pt,filepath):
f = open(filepath + '/Geo_spatial_data_checkpoints.csv','r')
lines = f.readlines()[1:]
f.close()
pt_index = pt-101
pt_data = np.asarray(lines[pt_index].strip().split(',')[1:6],dtype = float)[True,True,False,False,True]
return(pt_data)
def point_data_pixel(pt,filepath):
f = open(filepath + '/Geo_spatial_data_checkpoints.csv','r')
lines = f.readlines()[1:]
f.close()
pt_index = pt-101
pt_data = np.append(np.asarray(lines[pt_index].strip().split(',')[4:6],dtype = float),
float(lines[pt_index].strip().split(',')[3]))
return(pt_data)
def point_distance(pt1,pt2,filepath = '.'):
'''Takes two points as ints, for example, 111, 127, 15. Reads the csv file
at location filepath (default: the current working directory - and pulls the
Lat Long and elvation information.Plugs them into distance and returns the answer.
'''
p1_data = point_data_lat_long(pt1,filepath)
p2_data = point_data_lat_long(pt2,filepath)
(dist,correction) = distance(p1_data,p2_data)
return(abs(dist)+correction)
def point_distance_pixel(pt1,pt2,filepath = '.'):
'''Takes two points as ints, for example, 111, 127, 15. Reads the csv file
at location filepath (default: the current working directory - and pulls the
Lat Long and elvation information.Plugs them into distance and returns the answer.
'''
f = open(filepath + '/Geo_spatial_data_checkpoints_meters.csv','r')
lines = f.readlines()[1:]
f.close()
p1_data = np.asarray(lines[pt1-100].strip().split(','),dtype = float)
p2_data = np.asarray(lines[pt2-100].strip().split(','),dtype = float)
distance = np.sqrt( (p1_data[1]-p2_data[1])**2 +
(p1_data[2]-p2_data[2])**2 +
(p1_data[3]-p2_data[3])**2
)
return(distance)
def polar_to_cartesian(filepath = '.'):
'''Takes the Latitudes and Longitudes and converts them into X,Y coordinates.
Y coords calculated as R_earth*(delta_Lat)
X coords calculated as R_earth*(delta_Long)*cosine(Latitude)
(0,0) defined as the start/finish'''
f = open(filepath + '/Geo_spatial_data_checkpoints.csv','r')
lines = f.readlines()[1:]
f.close()
sf = lines[-1].strip().split(',')
sf_Lat = float(sf[1])
sf_Long = float(sf[2])
Pt = ['100']
X = ['0']
Y = ['0']
for line in lines[:-1]:
pc = line.strip().split(',')
Lat = float(pc[1])
dLat = Lat-sf_Lat
dLong = float(pc[2])-sf_Long
Pt.append(pc[0])
Y.append(str(Earth_Radius*dLat*degrees))
X.append(str(Earth_Radius*dLong*degrees*np.cos(Lat*degrees)))
f = open(filepath + '/Geo_spatial_data_checkpoints_meters.csv','w')
f.write(','.join(['Point','X','Y']))
for i in range(len(Pt)):
f.write(','.join([Pt[i],X[i],Y[i]]))
f.close()
Pt = np.asarray(Pt,dtype = int)
X = np.asarray(X, dtype = float)
Y = np.asarray(Y,dtype = float)
return(Pt,X,Y)
def pixel_to_cartesian(filepath = '.'):
'''Takes the X/Y pixel values and converts them into X,Y coordinates in meters relative to start.
(0,0) defined as the start/finish'''
f = open(filepath + '/Geo_spatial_data_checkpoints.csv','r')
lines = f.readlines()[1:]
f.close()
sf = lines[-1].strip().split(',')
sf_X_pix = float(sf[3])
sf_Y_pix = float(sf[4])
Pt = ['100']
X = ['0']
Y = ['0']
elev = ['356']
dX_pix = 0
dY_pix = 0
for line in lines[:-1]:
pc = line.strip().split(',')
dX_pix = float(pc[3])-sf_X_pix
dY_pix = float(pc[4])-sf_Y_pix
Pt.append(pc[0])
X.append(str(dX_pix*pixel_to_meter))
Y.append(str(dY_pix*pixel_to_meter))
elev.append(pc[5])
f = open(filepath + '/Geo_spatial_data_checkpoints_meters.csv','w')
f.write(','.join(['Point','X','Y','Elevation']))
f.write('\n')
for i in range(len(Pt)):
f.write(','.join([Pt[i],X[i],Y[i],elev[i]]))
f.write('\n')
f.close()
Pt = np.asarray(Pt,dtype = int)
X = np.asarray(X, dtype = float)
Y = np.asarray(Y,dtype = float)
Z = np.asarray(elev,dtype = float)
return(Pt,X,Y,Z)
| {"/Graph_dictionary.py": ["/distance_from_lat_long.py"], "/Map_Graph_Plot.py": ["/Graph_dictionary.py", "/distance_from_lat_long.py"]} |
54,415 | PatJByrne/msd_project | refs/heads/master | /Clean and Model Data.py |
# coding: utf-8
# This notebook takes csv's that contain data that was scraped from the orienteering website and cleans it to prepare it for a model.
# In[1]:
# In[2]:
from __future__ import print_function
import csv as csv
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#mpl.rcParams.update({'axes.labelsize': 20})
import copy
import re
# ### Import and begin preparing in pandas
# We start by importing the csv with the scraped data from the orienteering website and putting it in multiple pandas dataframes.
# In[3]:
route_df = pd.read_csv('Race_data_team_splits_checkpoints.csv', header=0)
# In[4]:
route_df['team ID'] = route_df['Name'].map(lambda x: int(x[:3]))
route_df['Team Name'] = route_df['Name'].map(lambda x: x[4:].strip())
route_df['Checkpoint'] = route_df['Checkpoint'].str.replace(r"\(","")
route_df['Checkpoint'] = route_df['Checkpoint'].str.replace(r"\)","")
route_df.columns = ['Name', 'Checkpoint', 'Total Time', 'Split', 'team ID', 'Team Name']
route_df = route_df.drop('Name', 1)
# In[5]:
cols = route_df.columns.tolist()
cols = cols[-2:] + cols[:-2]
route_df = route_df[cols]
# In[6]:
route_df.to_csv('clean_data/team_routes.csv')
# In[7]:
#route_df.head()
# In[8]:
team_df = pd.read_csv('registration_list.csv', header=0)
score_df = pd.read_csv('Team_place_time_score.csv', header=None)
# Below, the 'Team Name' is stripped of spaces because they cause problems with search in Pandas.
# In[9]:
team_df['test'] = team_df['Team Name'].map(lambda x: ''.join(x.split(' ')).strip())
# In[10]:
score_df.columns = ['Rank', 'Name', 'Class', 'Club', 'Time', 'Score', 'Gross Score', 'Penalty', 'time_limit']
# In[11]:
# Split the team into team ID and Team Name
score_df['team ID'] = score_df['Name'].map(lambda x: int(x[:3]))
score_df['Team Name'] = score_df['Name'].map(lambda x: x[4:].strip())
# Strip the Team Name to make a column that is searchable without spaces
score_df['test'] = score_df['Team Name'].map(lambda x: ''.join(x.split(' ')).strip())
# In[12]:
#score_df.describe()
# ### Merge datasets to build team information
# Next, we merge the dataset with the team information with their score. Since the team ID is not included in the team information, we need to merge based on the team name. We use the column where spaces are removed from the team name, called 'test', because Pandas does not match names with spaces. Afterwards, we have a dataframe with team information and their score.
# In[13]:
test_df = pd.merge(team_df, score_df, on='test', how='right')
# In[14]:
# Not all of the merges worked, so need to manually enter some data.
test_df.loc[test_df['team ID'] == 459, 'Borrow'] = 'Y'
test_df.loc[test_df['team ID'] == 459, 'Size'] = 4
test_df.loc[test_df['team ID'] == 286, 'Borrow'] = 'Y'
test_df.loc[test_df['team ID'] == 286, 'Size'] = 5
test_df.loc[test_df['team ID'] == 276, 'Borrow'] = 'Y'
test_df.loc[test_df['team ID'] == 276, 'Size'] = 2
# In[15]:
test_df = test_df[test_df['Size'].notnull()]
# In[16]:
test_df = test_df.drop('Team Name_x', 1)
test_df = test_df.drop('time_limit_x', 1)
test_df = test_df.drop('Name', 1)
test_df = test_df.rename(columns={'Team Name_y': 'Team Name', 'time_limit_y': 'Time Limit'})
# In[17]:
#test_df.head()
# In[18]:
# The following reorganizes the order of the columns
cols = test_df.columns.tolist()
cols = cols[-2:] + cols[:-2]
temp = cols[-4]
del cols[-4]
cols.append(temp)
temp = cols[-2]
del cols[-2]
cols.insert(2, temp)
test_df = test_df[cols]
# In[19]:
#test_df.head()
# In[20]:
# Save cleaned data
test_df.to_csv('clean_data/team_info_score.csv')
# ### Prepare data for model
# In order to prepare the data for modeling we did the following:
# * Remove all columns except: size, borrow, class, club, penalty, and score
# * Change the borrow column from Y/N to 0/1
# * Change the class column to a binary vector, so each team will have a '1' in the column with their class and '0' in the others
# * Change club to 0/1 whether the team is a club member or not
# * Change penalty to binary on whether they were penalized for being late or not
# * Split the data into teams from the 6-hour race and 3-hour race
# In[21]:
model_df = test_df.drop(['Team Name', 'Category', 'test', 'Rank', 'Time', 'Gross Score'], 1)
# In[22]:
model_df['Club_b'] = model_df['Club'].fillna(0).map( { 0: 0, 'WPOC': 1 })
model_df['Penalty_b'] = model_df['Penalty'].map(lambda x: 0 if x == 0 else 1)
model_df['Borrow_b'] = model_df['Borrow'].map( { 'N': 0, 'Y': 1 } )
# In[23]:
#model_df.head()
# In[24]:
model_hour3_df = model_df[ model_df['Time Limit'] == 3 ]
model_hour6_df = model_df[ model_df['Time Limit'] == 6 ]
# In[25]:
model_hour3_df = pd.concat([model_hour3_df, pd.get_dummies(model_hour3_df['Class'])], axis=1)
model_hour6_df = pd.concat([model_hour6_df, pd.get_dummies(model_hour6_df['Class'])], axis=1)
# In[26]:
model_hour3_df = model_hour3_df.drop(['team ID', 'Time Limit', 'Borrow', 'Class', 'Club', 'Penalty'], 1)
model_hour6_df = model_hour6_df.drop(['team ID', 'Time Limit', 'Borrow', 'Class', 'Club', 'Penalty'], 1)
# In[27]:
#model_hour6_df.head()
# ### Adding checkpoints visited to model
# As a separate analysis, we added the checkpoints each team hit into a separate model. This was done to see if certain checkpoints were correlated with higher scores. To do this, we added a binary vector of length 50, one for each of the 50 checkpoints, so that if a team reached that checkpoint, the value for that column would be 1, otherwise it would be 0.
# In[28]:
#route_df.head()
# In[29]:
checkpoints_by_team = route_df.dropna().groupby('team ID')['Checkpoint'].apply(lambda x: x.tolist())
#len(checkpoints_by_team.index)
# In[30]:
checkpoint_array = np.zeros((70,51))
# In[31]:
for i,teamID in enumerate(checkpoints_by_team.keys()):
checkpoint_array[i,0] = np.int(teamID)
for checkpoint in checkpoints_by_team[teamID]:
if checkpoint =='F' or checkpoint == 'nan':
continue
idx = np.int(checkpoint) - 100
checkpoint_array[i, idx] = 1
#print(teamID, checkpoints_by_team[teamID])
# In[32]:
#checkpoint_array[:,1:]
# In[33]:
columns = ['team ID',]
columns.extend([ '{}'.format(x) for x in np.arange(101,151)])
checkpoint_df = pd.DataFrame.from_records(checkpoint_array, columns=columns)
# In[34]:
#checkpoint_df.head()
# In[35]:
model_checkpoints_df = pd.merge(model_df, checkpoint_df, on='team ID', how='right')
# In[36]:
#model_checkpoints_df.head()
# In[37]:
model_checkpoints_hour3_df = model_checkpoints_df[ model_checkpoints_df['Time Limit'] == 3 ]
model_checkpoints_hour6_df = model_checkpoints_df[ model_checkpoints_df['Time Limit'] == 6 ]
# In[38]:
model_checkpoints_hour3_df = pd.concat([model_checkpoints_hour3_df, pd.get_dummies(model_checkpoints_hour3_df['Class'])], axis=1)
model_checkpoints_hour6_df = pd.concat([model_checkpoints_hour6_df, pd.get_dummies(model_checkpoints_hour6_df['Class'])], axis=1)
# In[39]:
model_checkpoints_hour3_df = model_checkpoints_hour3_df.drop(['team ID', 'Time Limit', 'Borrow', 'Class', 'Club', 'Penalty'], 1)
model_checkpoints_hour6_df = model_checkpoints_hour6_df.drop(['team ID', 'Time Limit', 'Borrow', 'Class', 'Club', 'Penalty'], 1)
# In[40]:
#model_checkpoints_hour6_df.head()
# ### Setup and train model
# We created a model using LassoCV in scikit-learn because we have many parameters and not very much data, so we only want to use the important parameters. We start by looking at the data without the information about which checkpoints the teams reached. The value we are trying to predict is the score.
#
# Due to the limited size of the dataset, we setup multiple iterations of randomly selecting a training set and test set from the data, fitting a model to training set, and seeing the root mean square error for both the training set and test set. We compared the model to a model where we just predict the average score of all the teams. By doing multiple iterations, we averaged each RMSE for each iteration, as well as the coefficients for each of the parameters.
# In[41]:
from sklearn import linear_model
def fit_model(df):
mask = np.random.rand(len(df)) < 0.8
train_df = df[mask]
test_df = df[~mask]
train_results = train_df['Score'].values
test_results = test_df['Score'].values
train_data = train_df.drop(['Score'], axis=1).values
test_data = test_df.drop(['Score'], axis=1).values
model = linear_model.LassoCV(cv=5)
model.fit(train_data, train_results)
predict_train_data = model.predict(train_data)
predict_test_data = model.predict(test_data)
predict_avg = np.average(train_results)*np.ones(predict_train_data.shape)
train_mse = np.sqrt(((predict_train_data - train_results) ** 2).mean())
test_mse = np.sqrt(((predict_test_data - test_results) ** 2).mean())
predict_avg_mse = np.sqrt(((predict_avg - train_results) ** 2).mean())
return (model.coef_, train_mse, test_mse, predict_avg_mse)
# ### Model for 6 hour race, no checkpoints
# We find a rather high RMSE for both the training and test sets for the 6-hour race data. The RMSE is very close to or worse than the RMSE for just predicting the average score, meaning this is not a good model for predicting score. There are probably too few parameters and too few data for this model to be good.
# In[42]:
coef_list_6 = []
train_mse_list_6 = []
test_mse_list_6 = []
predict_avg_mse_list_6 = []
for i in np.arange(50):
coefs, train_mse, test_mse, predict_avg_mse = fit_model(model_hour6_df)
coef_list_6.append(coefs)
train_mse_list_6.append(train_mse)
test_mse_list_6.append(test_mse)
predict_avg_mse_list_6.append(predict_avg_mse)
print('The root mean squared error of the 6-hour no checkpoints training set is {0:0.2f}.'.format(np.average(train_mse_list_6)))
print('The root mean squared error of the 6-hour no checkpoints test set is {0:0.2f}.'.format(np.average(test_mse_list_6)))
print('The mean squared error of predicting the average score of the 6-hour no checkpoints is {0:0.2f}.'.format(np.average(predict_avg_mse_list_6)))
# ### Model for 3 hour race, no checkpoints
# The RMSE is also bad for the 3-hour race, for which there is even less data.
# In[43]:
coef_list_3 = []
train_mse_list_3 = []
test_mse_list_3 = []
predict_avg_mse_list_3 = []
for i in np.arange(50):
coefs, train_mse, test_mse, predict_avg_mse = fit_model(model_hour3_df)
coef_list_3.append(coefs)
train_mse_list_3.append(train_mse)
test_mse_list_3.append(test_mse)
predict_avg_mse_list_3.append(predict_avg_mse)
print('The root mean squared error of the 3-hour no checkpoints training set is {0:0.2f}.'.format(np.average(train_mse_list_3)))
print('The root mean squared error of the 3-hour no checkpoints test set is {0:0.2f}.'.format(np.average(test_mse_list_3)))
print('The mean squared error of predicting the average score of the 3-hour no checkpoints is {0:0.2f}.'.format(np.average(predict_avg_mse_list_3)))
# ### Look at model parameters for no checkpoint cases
# Below we show the coefficients for the 6-hour and 3-hour model separately. Each coefficient is an average of the 50 iterations of the model fits for randomly selected training sets.
#
# We see for the 6-hour mode, not many parameters have sizable coefficients, so the model ends up being very simplistic and the parameters we have don't seem to be important in predicting a team's score. Most notable is that a larger size of the team correlates with lower scores. Also, teams that needed to borrow the electronic piece of equipment for tracking their checkpoints tended to get lower scores. The 3-hour model showed some interesting coefficients, showing that the teams that were penalized for returning to the finish late got lower scores and the mixed mens class tended to do better.
# In[44]:
cols = list(model_hour6_df.drop(['Score'], axis=1).columns.values)
#print(len(cols), len(model_6chk.coef_))
print('6-hour\t3-hour\tcoeff')
for i,col in enumerate(cols):
coeffs6 = []
coeffs3 = []
for j in np.arange(len(coef_list_6)):
coeffs6.append(coef_list_6[j][i])
coeffs3.append(coef_list_3[j][i])
print('{:>6.1f}\t{:>6.1f}\t{}'.format(np.average(coeffs6), np.average(coeffs3), col))
# ## With checkpoints
# ### 6-hour model with checkpoints
# By including the checkpoints as parameters in the model, the prediction increases, as expected. The RMSE of the training set is 53 and the test set is 125, which is reasonable for total scores around 500-1000 and with limited training data. Both of these are less than the naive prediction of average score for each team.
# In[45]:
coef_list_6 = []
train_mse_list_6 = []
test_mse_list_6 = []
predict_avg_mse_list_6 = []
for i in np.arange(50):
coefs, train_mse, test_mse, predict_avg_mse = fit_model(model_checkpoints_hour6_df)
coef_list_6.append(coefs)
train_mse_list_6.append(train_mse)
test_mse_list_6.append(test_mse)
predict_avg_mse_list_6.append(predict_avg_mse)
print('The root mean squared error of the 6-hour with checkpoints training set is {0:0.2f}.'.format(np.average(train_mse_list_6)))
print('The root mean squared error of the 6-hour with checkpoints test set is {0:0.2f}.'.format(np.average(test_mse_list_6)))
print('The mean squared error of predicting the average score of the 6-hour with checkpoints is {0:0.2f}.'.format(np.average(predict_avg_mse_list_6)))
# ### 3-hour model with checkpoints
# The 3-hour model did not do very well again, probably because of the limitation in the number od teams and training data.
# In[46]:
coef_list_3 = []
train_mse_list_3 = []
test_mse_list_3 = []
predict_avg_mse_list_3 = []
for i in np.arange(50):
coefs, train_mse, test_mse, predict_avg_mse = fit_model(model_checkpoints_hour3_df)
coef_list_3.append(coefs)
train_mse_list_3.append(train_mse)
test_mse_list_3.append(test_mse)
predict_avg_mse_list_3.append(predict_avg_mse)
print('The root mean squared error of the 3-hour with checkpoints training set is {0:0.2f}.'.format(np.average(train_mse_list_3)))
print('The root mean squared error of the 3-hour with checkpoints test set is {0:0.2f}.'.format(np.average(test_mse_list_3)))
print('The mean squared error of predicting the average score of the 6-hour with checkpoints is {0:0.2f}.'.format(np.average(predict_avg_mse_list_3)))
# ### Looking at parameters for with checkpoint cases
# By including the checkpoints in the model, we can see which checkpoints are correlated with higher scores. The most notable are 124, 125, and 149 because they all have high coefficients for the 6-hour race. Based on their location on the map, they do not seem to be easy checkpoints to get to based on distance, but perhaps the better and faster teams could plan a route to reach them, while picking up other checkpoints along the way. It is also interesting to note that none of the coefficients have large negative values for the checkpoints, so there are no checkpoints associated with lower scores.
# In[47]:
cols = list(model_checkpoints_hour6_df.drop(['Score'], axis=1).columns.values)
#print(len(cols), len(model_6chk.coef_))
print('6-hour\t3-hour\tcoeff')
for i,col in enumerate(cols):
coeffs6 = []
coeffs3 = []
for j in np.arange(len(coef_list_6)):
coeffs6.append(coef_list_6[j][i])
coeffs3.append(coef_list_3[j][i])
print('{:>6.1f}\t{:>6.1f}\t{}'.format(np.average(coeffs6), np.average(coeffs3), col))
# In[ ]:
| {"/Graph_dictionary.py": ["/distance_from_lat_long.py"], "/Map_Graph_Plot.py": ["/Graph_dictionary.py", "/distance_from_lat_long.py"]} |
54,416 | PatJByrne/msd_project | refs/heads/master | /Data_grouping.py | '''
Program - Groups data based on teams and checkpoints
Input - Race_data_team_splits_checkpoints.csv
Output - Team_wise_points.txt, Point_wise_time.txt
Author - Shivangi Saxena
'''
import csv
from collections import defaultdict
with open('Race_data_team_splits_checkpoints.csv', newline = '') as ip, open('Team_wise_points.txt','w') as op_T, open('Point_wise_time.txt','w') as op_P:
read_ip = csv.reader(ip, delimiter = ',', quotechar='"')
firstline = True
temp = {} ##keeps a track of the teams covered, temp dictionary
splits = [] ##main list, keeps record of team, source checkpoint, destination checkpoint, time taken between these points
for row in read_ip:
if firstline:
firstline = False
continue ##skips the header of the CSV file
team_name = row[0]
destination = row[1]
time = row[3]
if team_name not in temp: ##if new team, keep source as start-point
source = " (F)"
temp[team_name] = True
splits.append([team_name,source,destination,time])
source = destination ##carry-over source to next iteration
##make dictionary that groups together info from "splits" based on TEAMS
teams = defaultdict(list)
for row in splits:
teams[row[0]].append([row[1],row[2],row[3]])
for t in teams:
op_T.write("\n" + str(t) + ":\n")
for point in teams[t]:
op_T.write(str(point[0]) + " " + str(point[1]) + " " + str(point[2]) + "\n")
##group-by start and end-points
points = defaultdict(list)
for row in splits:
if ((row[1][1:4]).isdigit()):
k1 = int(row[1][1:4])
else:
k1 = 0
if ((row[2][1:4]).isdigit()):
k2 = int(row[2][1:4])
else:
k2 = 0
if (k1<k2):
key = (row[1],row[2])
else:
key = (row[2],row[1])
points[key].append([row[0],row[3]])
for p in points:
op_P.write("\n" + str(p[0]) + " - " + str(p[1]) + ":\n")
for row in points[p]:
op_P.write(str(row[0]) + " ---> " + str(row[1]) + "\n") | {"/Graph_dictionary.py": ["/distance_from_lat_long.py"], "/Map_Graph_Plot.py": ["/Graph_dictionary.py", "/distance_from_lat_long.py"]} |
54,417 | PatJByrne/msd_project | refs/heads/master | /Graph_dictionary.py |
# coding: utf-8
# In[3]:
import re
from collections import OrderedDict
import distance_from_lat_long
from copy import deepcopy
import matplotlib.pyplot as plt
#%matplotlib inline
def split_to_time(splt):
[hr,mn,sc] = splt.split(':')
time = int(hr)*60**2 + int(mn)*60 + int(sc)
return(time)
class Node(object):
def __init__(self,num,prv = None, nxt = None,splt = None,pts = None):
self.num = num
self.time = split_to_time(splt)
self.points = pts
if (prv != None):
self.prv = prv
self.prv_edge = distance_from_lat_long.point_distance_pixel(self.num,self.prv)
else: [self.prv,self.prv_edge] = [None,None]
if (nxt != None):
self.nxt = nxt
self.nxt_edge = distance_from_lat_long.point_distance_pixel(self.num,self.nxt)
else: [self.nxt,self.nxt_edge] = [None,None]
def set_nxt(self,nxt):
self.nxt = nxt
#print self.num, self.nxt
self.nxt_edge = distance_from_lat_long.point_distance_pixel(self.num,self.nxt)
def team_grapher():
f = open('Race_data_team_splits_checkpoints.csv','r')
lines = f.readlines()[1:]
f.close()
team_graph = OrderedDict()
ST_Node = Node(num =100,splt = '0:0:0')
for l in range(len(lines)):
line = re.sub('\"','',lines[l])
team = line.strip().split()[0]
splt = line.strip().split(',')[-1]
point = re.sub('\)','',re.sub('\(','',line.strip().split(',')[1]))
if '*' in splt:
continue
if point == 'F':
point = 100
elif point == 'NA':
continue
else:
point = int(point)
pts = 10+((point-101)//10)*10
if (team not in team_graph.keys()):
team_graph[team] = []
ST_Node.set_nxt(point)
team_graph[team].append(deepcopy(ST_Node))
prv_Node = ST_Node
else:
prv_Node = team_graph[team][-1]
prv_Node.set_nxt(point)
team_graph[team].append(Node(point, prv = prv_Node.num,splt = splt,pts = pts))
return(team_graph)
# In[4]:
#sec_pt = 0
#for n,node in enumerate(team_graph['454']):
# print node.num, node.prv_edge,node.nxt_edge,node.time,node.points
# if node.num != 100:
# plt.plot(n,(float(node.time)/float(node.points))**-1,'o')
# sec_pt += node.time/float(node.points)
#print sec_pt
# In[4]:
#print '# ','<-time','->time','pts'
#sec_pt = 0
#for n,node in enumerate(team_graph['439']):
# print node.num,node.prv_edge,node.nxt_edge,node.time
# if node.num != 100:
# plt.plot(n,node.time/float(node.points),'o')
# plt.ylim(0,200)
# sec_pt += node.time/float(node.points)
#print sec_pt
# In[5]:
#for node in team_graph['275']:
# print node.num,node.prv_edge,node.nxt_edge,node.time
# In[6]:
#for node in team_graph['439']:
# print node.num,node.prv_edge,node.nxt_edge,node.time
# In[7]:
#for node in team_graph['409']:
# print node.num,node.prv_edge,node.nxt_edge,node.time
# In[ ]:
| {"/Graph_dictionary.py": ["/distance_from_lat_long.py"], "/Map_Graph_Plot.py": ["/Graph_dictionary.py", "/distance_from_lat_long.py"]} |
54,418 | PatJByrne/msd_project | refs/heads/master | /Map_Graph_Plot.py | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import Graph_dictionary
import distance_from_lat_long
from collections import OrderedDict
team_graph = Graph_dictionary.team_grapher()
edge_count = OrderedDict()
max_edge = 0
for team in team_graph.keys():
course_map = team_graph[team]
for node in course_map[:-1]:
edge = [node.num,node.nxt]
edge.sort()
edge_name = '%d-%d' % (edge[0],edge[1])
if edge_name not in edge_count.keys():
edge_count[edge_name] = 0
edge_count[edge_name] += 1
if edge_count[edge_name] > max_edge:
max_edge = edge_count[edge_name]
# <codecell>
fig = plt.figure()
ax = fig.add_subplot(111,aspect = 'equal')
(yst,xst,elev) = distance_from_lat_long.point_data(100,'.')
for edge in edge_count.keys():
(y1,x1,elev) = distance_from_lat_long.point_data(int(edge.split('-')[0]),'.')
(y2,x2,elev) = distance_from_lat_long.point_data(int(edge.split('-')[1]),'.')
x1 -= xst
x2 -= xst
y1 -= yst
y2 -= yst
plt.plot([x1,x2],[y1,y2],'r',linewidth = 1+edge_count[edge]%4,alpha = edge_count[edge]/float(max_edge))
plt.plot([x1,x2],[y1,y2],'ob')
# <codecell>
for node in team_graph['454'][:-1]:
(y1,x1,elev) = distance_from_lat_long.point_data(node.num,'.')
(y2,x2,elev) = distance_from_lat_long.point_data(node.nxt,'.')
x1 -= xst
x2 -= xst
y1 -= yst
y2 -= yst
plt.plot([x1,x2],[y1,y2],'b',linewidth =1,alpha = .5)
# <codecell>
| {"/Graph_dictionary.py": ["/distance_from_lat_long.py"], "/Map_Graph_Plot.py": ["/Graph_dictionary.py", "/distance_from_lat_long.py"]} |
54,459 | yurifw/Alpha | refs/heads/master | /alpha.py | #!/usr/bin/python
import random
import os
def generate_random_key(size, path):
"""generates a random key with size = to size (in bytes) and save it to path (path should include file name)"""
key = bytearray()
for i in range(size):
key.append(random.randrange(0, 255))
f = open(path, 'w+')
f.write(key)
f.close()
def read_file(path):
"""reads the given file and returns a bytearray"""
bytes = bytearray()
f = open(path)
for b in f.read():
bytes.append(ord(b))
return bytes
def s_box(byte):
"""pass the given byte through the rijndael's s-box and return the resultant byte
"""
s_box = ((0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76),
(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0),
(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15),
(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75),
(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84),
(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf),
(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8),
(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2),
(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73),
(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb),
(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79),
(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08),
(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a),
(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e),
(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf),
(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16))
col = byte >> 4
row = byte & 15
return s_box[col][row]
def get_block(array, n, block_size):
"""returns the nth block of a block_size'd array (n starts at 0)"""
return array[(n*block_size):(n*block_size+block_size)]
def key_expand(key, size):
"""expands the given key until it reaches the specified size (in bytes)"""
rcon = bytearray()
rcon.extend([0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a,
0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39,
0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a,
0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8,
0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef,
0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc,
0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b,
0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3,
0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94,
0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20,
0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35,
0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f,
0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04,
0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63,
0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd,
0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d])
rcon_index = 1
initial_size = len(key)
def schedule_core(temp, r):
aux = temp[0] # rotword
temp[0] = temp[1]
temp[1] = temp[2]
temp[2] = temp[3]
temp[3] = aux
for i in range(4):
temp[i] = s_box(temp[i])
temp[0] = temp[0] ^ rcon[r]
return temp
t = [0]*4 # initializing temporary bytes
while len(key) < size:
t[0] = key[-4]
t[1] = key[-3]
t[2] = key[-2]
t[3] = key[-1]
if (len(key) % 16) == 0:
t = schedule_core(t, rcon_index)
rcon_index = rcon_index +1
key.append(t[0] ^ key[len(key)-initial_size])
key.append(t[1] ^ key[len(key)-initial_size])
key.append(t[2] ^ key[len(key)-initial_size])
key.append(t[3] ^ key[len(key)-initial_size])
return key
def n_alpha(xy):
"""n_alpha transformation"""
return s_box(xy)
def l_alpha(e):
"""l_alpha transformation, uses the matrix defined at Lambert's dissertation (appendix 12.8.1)"""
def transform(seq):
t = [None]*64
t[0] = ((seq[5] >> 2) ^ (seq[5] >> 4) ^ (seq[1] >> 6) ^ (seq[0] >> 5) ^ (seq[6] >> 7) ^ (seq[3] >> 5) ^ (seq[4] >> 6)) & 1
t[1] = ((seq[7] >> 2) ^ (seq[4] >> 2) ^ (seq[2] >> 2) ^ (seq[1] >> 5) ^ (seq[6] >> 1) ^ (seq[5] >> 6) ^ (seq[3] >> 6)) & 1
t[2] = ((seq[3] >> 1) ^ (seq[2] >> 1) ^ (seq[3] >> 3) ^ (seq[0] >> 4) ^ (seq[1] >> 2) ^ (seq[7] >> 7) ^ (seq[5] >> 5)) & 1
t[3] = ((seq[1] >> 4) ^ (seq[0] >> 1) ^ (seq[7] >> 6) ^ (seq[6] >> 4) ^ (seq[1] >> 2) ^ (seq[4] >> 5) ^ (seq[5] >> 4)) & 1
t[4] = ((seq[2] >> 3) ^ (seq[5] >> 7) ^ (seq[0]) ^ (seq[4] >> 7) ^ (seq[3] >> 7) ^ (seq[1] >> 7) ^ (seq[3])) & 1
t[5] = ((seq[6] >> 3) ^ (seq[2]) ^ (seq[4] >> 6) ^ (seq[6] >> 6) ^ (seq[7] >> 5) ^ (seq[0]) ^ (seq[3] >> 5)) & 1
t[6] = ((seq[3] >> 2) ^ (seq[2] >> 2) ^ (seq[5] >> 6) ^ (seq[0]) ^ (seq[7] >> 6) ^ (seq[6] >> 4) ^ (seq[4] >> 7)) & 1
t[7] = ((seq[0] >> 3) ^ (seq[6] >> 2) ^ (seq[3] >> 6) ^ (seq[4] >> 1) ^ (seq[2]) ^ (seq[1] >> 1) ^ (seq[0] >> 6)) & 1
t[8] = ((seq[0] >> 3) ^ (seq[4] >> 3) ^ (seq[0] >> 2) ^ (seq[2] >> 7) ^ (seq[7] >> 4) ^ (seq[1] >> 5) ^ (seq[2] >> 6)) & 1
t[9] = ((seq[3] >> 1) ^ (seq[3] >> 5) ^ (seq[0] >> 2) ^ (seq[6] >> 7) ^ (seq[4] >> 1) ^ (seq[6] >> 6) ^ (seq[1] >> 3)) & 1
t[10] = ((seq[1] >> 4) ^ (seq[7] >> 4) ^ (seq[1] >> 3) ^ (seq[4] >> 1) ^ (seq[6]) ^ (seq[0] >> 6) ^ (seq[2])) & 1
t[11] = ((seq[2] >> 3) ^ (seq[1] >> 5) ^ (seq[7] >> 4) ^ (seq[0] >> 4) ^ (seq[2] >> 7) ^ (seq[7] >> 6) ^ (seq[6] >> 4)) & 1
t[12] = ((seq[3] >> 2) ^ (seq[6] >> 7) ^ (seq[0] >> 7 >> 7) ^ (seq[3]) ^ (seq[5] >> 1) ^ (seq[2] >> 1) ^ (seq[7] >> 7)) & 1
t[13] = ((seq[5] >> 2) ^ (seq[1] >> 1) ^ (seq[3] >> 7) ^ (seq[4] >> 7) ^ (seq[5] >> 7) ^ (seq[6] >> 1) ^ (seq[2] >> 2)) & 1
t[14] = ((seq[4] >> 4) ^ (seq[0] >> 6) ^ (seq[1] >> 3) ^ (seq[5] >> 4) ^ (seq[1] >> 7) ^ (seq[7] >> 4) ^ (seq[5] >> 7)) & 1
t[15] = ((seq[6] >> 3) ^ (seq[5] >> 1) ^ (seq[4]) ^ (seq[2] >> 5) ^ (seq[6]) ^ (seq[0] >> 1) ^ (seq[1] >> 7)) & 1
t[16] = ((seq[1] >> 4) ^ (seq[0]) ^ (seq[4] >> 3) ^ (seq[2] >> 6) ^ (seq[3] >> 7) ^ (seq[7] >> 5) ^ (seq[6] >> 1)) & 1
t[17] = ((seq[5] >> 2) ^ (seq[1]) ^ (seq[0] >> 4) ^ (seq[7] >> 7) ^ (seq[2] >> 5) ^ (seq[5] >> 1) ^ (seq[0] >> 2)) & 1
t[18] = ((seq[5]) ^ (seq[6]) ^ (seq[5] >> 5) ^ (seq[7] >> 1) ^ (seq[4] >> 2) ^ (seq[0] >> 2) ^ (seq[6] >> 2)) & 1
t[19] = ((seq[6] >> 3) ^ (seq[6] >> 5) ^ (seq[0] >> 5) ^ (seq[7]) ^ (seq[4] >> 2) ^ (seq[1] >> 2) ^ (seq[5] >> 4)) & 1
t[20] = ((seq[7] >> 1) ^ (seq[5]) ^ (seq[1] >> 4) ^ (seq[0] >> 3) ^ (seq[3] >> 1) ^ (seq[7] >> 2) ^ (seq[6] >> 3)) & 1
t[21] = ((seq[4] >> 4) ^ (seq[7] >> 7) ^ (seq[2] >> 5) ^ (seq[4]) ^ (seq[7] >> 6) ^ (seq[6] >> 2) ^ (seq[3] >> 7)) & 1
t[22] = ((seq[3] >> 1) ^ (seq[5] >> 3) ^ (seq[2] >> 4) ^ (seq[4] >> 5) ^ (seq[0]) ^ (seq[1] >> 7) ^ (seq[7] >> 6)) & 1
t[23] = ((seq[3] >> 2) ^ (seq[1] >> 1) ^ (seq[6]) ^ (seq[4] >> 3) ^ (seq[6] >> 2) ^ (seq[0] >> 1) ^ (seq[1] >> 6)) & 1
t[24] = ((seq[0] >> 3) ^ (seq[1]) ^ (seq[2] >> 2) ^ (seq[5] >> 6) ^ (seq[3] >> 3) ^ (seq[6] >> 5) ^ (seq[5] >> 3)) & 1
t[25] = ((seq[6] >> 3) ^ (seq[2] >> 1) ^ (seq[3] >> 3) ^ (seq[6] >> 2) ^ (seq[1] >> 6) ^ (seq[7] >> 4) ^ (seq[3])) & 1
t[26] = ((seq[3] >> 2) ^ (seq[2] >> 4) ^ (seq[4] >> 2) ^ (seq[7] >> 5) ^ (seq[3] >> 7) ^ (seq[5] >> 3) ^ (seq[3] >> 6)) & 1
t[27] = ((seq[5] >> 2) ^ (seq[1] >> 2) ^ (seq[5] >> 3) ^ (seq[4] >> 3) ^ (seq[2] >> 1) ^ (seq[4] >> 1) ^ (seq[7] >> 3)) & 1
t[28] = ((seq[4] >> 4) ^ (seq[4] >> 5) ^ (seq[3]) ^ (seq[0] >> 4) ^ (seq[6] >> 7) ^ (seq[1] >> 5) ^ (seq[5] >> 1)) & 1
t[29] = ((seq[5]) ^ (seq[7] >> 5) ^ (seq[4]) ^ (seq[1] >> 3) ^ (seq[2]) ^ (seq[3] >> 5) ^ (seq[0] >> 1)) & 1
t[30] = ((seq[2] >> 3) ^ (seq[2] >> 1) ^ (seq[4] >> 2) ^ (seq[1] >> 6) ^ (seq[6] >> 1) ^ (seq[0] >> 5) ^ (seq[5] >> 3)) & 1
t[31] = ((seq[0] >> 3) ^ (seq[3] >> 6) ^ (seq[7] >> 5) ^ (seq[4] >> 2) ^ (seq[6] >> 7) ^ (seq[0] >> 7) ^ (seq[1] >> 2)) & 1
t[32] = ((seq[0] >> 3) ^ (seq[5] >> 1) ^ (seq[7] >> 7) ^ (seq[3] >> 4) ^ (seq[0] >> 1) ^ (seq[4]) ^ (seq[6] >> 4)) & 1
t[33] = ((seq[5] >> 2) ^ (seq[0] >> 1) ^ (seq[6]) ^ (seq[2] >> 4) ^ (seq[3] >> 3) ^ (seq[5] >> 6) ^ (seq[7])) & 1
t[34] = ((seq[5]) ^ (seq[1] >> 7) ^ (seq[1] >> 1) ^ (seq[0] >> 4) ^ (seq[7]) ^ (seq[6] >> 5) ^ (seq[4] >> 7)) & 1
t[35] = ((seq[5]) ^ (seq[3] >> 3) ^ (seq[4] >> 3) ^ (seq[2] >> 2) ^ (seq[1] >> 1) ^ (seq[6] >> 7) ^ (seq[5] >> 6)) & 1
t[36] = ((seq[4] >> 4) ^ (seq[0] >> 2) ^ (seq[6] >> 5) ^ (seq[4] >> 1) ^ (seq[1]) ^ (seq[3] >> 5) ^ (seq[2] >> 6)) & 1
t[37] = ((seq[3] >> 1) ^ (seq[5] >> 7) ^ (seq[6] >> 1) ^ (seq[3] >> 4) ^ (seq[5] >> 6) ^ (seq[1] >> 6) ^ (seq[7] >> 3)) & 1
t[38] = ((seq[7] >> 2) ^ (seq[0] >> 5) ^ (seq[7] >> 6) ^ (seq[4] >> 5) ^ (seq[0]) ^ (seq[1]) ^ (seq[6] >> 4)) & 1
t[39] = ((seq[3] >> 2) ^ (seq[5] >> 5) ^ (seq[2] >> 6) ^ (seq[0] >> 4) ^ (seq[7] >> 4) ^ (seq[3] >> 5) ^ (seq[4] >> 6)) & 1
t[40] = ((seq[0] >> 3) ^ (seq[4] >> 6) ^ (seq[6] >> 6) ^ (seq[2] >> 7) ^ (seq[7] >> 3) ^ (seq[0] >> 4) ^ (seq[7] >> 1)) & 1
t[41] = ((seq[4] >> 4) ^ (seq[1] >> 5) ^ (seq[0] >> 7) ^ (seq[6] >> 4) ^ (seq[3] >> 6) ^ (seq[4] >> 5) ^ (seq[2] >> 7)) & 1
t[42] = ((seq[2] >> 5) ^ (seq[5] >> 4) ^ (seq[2] >> 7) ^ (seq[6] >> 5) ^ (seq[2]) ^ (seq[6] >> 1) ^ (seq[0] >> 6)) & 1
t[43] = ((seq[6] >> 3) ^ (seq[5] >> 3) ^ (seq[6] >> 4) ^ (seq[1] >> 3) ^ (seq[5] >> 5) ^ (seq[7] >> 1) ^ (seq[2] >> 2)) & 1
t[44] = ((seq[3] >> 1) ^ (seq[4] >> 3) ^ (seq[6] >> 2) ^ (seq[2] >> 7) ^ (seq[7]) ^ (seq[5] >> 4) ^ (seq[1] >> 1)) & 1
t[45] = ((seq[5]) ^ (seq[6] >> 6) ^ (seq[3] >> 4) ^ (seq[0] >> 7) ^ (seq[4] >> 6) ^ (seq[1] >> 2) ^ (seq[2] >> 6)) & 1
t[46] = ((seq[7] >> 2) ^ (seq[3] >> 4) ^ (seq[5] >> 3) ^ (seq[2]) ^ (seq[4]) ^ (seq[5] >> 5) ^ (seq[2] >> 1)) & 1
t[47] = ((seq[2] >> 3) ^ (seq[4] >> 4) ^ (seq[5] >> 2) ^ (seq[3] >> 2) ^ (seq[2] >> 5) ^ (seq[4] >> 5) ^ (seq[6] >> 2)) & 1
t[48] = ((seq[4] >> 4) ^ (seq[3] >> 3) ^ (seq[0]) ^ (seq[2]) ^ (seq[6] >> 6) ^ (seq[4] >> 2) ^ (seq[5] >> 5)) & 1
t[49] = ((seq[5] >> 2) ^ (seq[7] >> 1) ^ (seq[2] >> 6) ^ (seq[0] >> 2) ^ (seq[6] >> 5) ^ (seq[3]) ^ (seq[7] >> 5)) & 1
t[50] = ((seq[7] >> 2) ^ (seq[3] >> 7) ^ (seq[0] >> 1) ^ (seq[7] >> 4) ^ (seq[1] >> 7) ^ (seq[4] >> 7) ^ (seq[1] >> 1)) & 1
t[51] = ((seq[1] >> 4) ^ (seq[6]) ^ (seq[1] >> 3) ^ (seq[0] >> 5) ^ (seq[5] >> 7) ^ (seq[3] >> 5) ^ (seq[1] >> 6)) & 1
t[52] = ((seq[2] >> 3) ^ (seq[2] >> 4) ^ (seq[3] >> 6) ^ (seq[1]) ^ (seq[0] >> 2) ^ (seq[7] >> 7) ^ (seq[5] >> 4)) & 1
t[53] = ((seq[5]) ^ (seq[1]) ^ (seq[6] >> 7) ^ (seq[7] >> 3) ^ (seq[0] >> 5) ^ (seq[4] >> 6) ^ (seq[2] >> 1)) & 1
t[54] = ((seq[3] >> 1) ^ (seq[7] >> 5) ^ (seq[0] >> 6) ^ (seq[4] >> 6) ^ (seq[2] >> 6) ^ (seq[3]) ^ (seq[5] >> 1)) & 1
t[55] = ((seq[2] >> 5) ^ (seq[1] >> 3) ^ (seq[7] >> 1) ^ (seq[3] >> 4) ^ (seq[4] >> 1) ^ (seq[1] >> 5) ^ (seq[0] >> 5)) & 1
t[56] = ((seq[6] >> 3) ^ (seq[5] >> 6) ^ (seq[0] >> 6) ^ (seq[7] >> 6) ^ (seq[3] >> 4) ^ (seq[4] >> 3) ^ (seq[2] >> 4)) & 1
t[57] = ((seq[1] >> 4) ^ (seq[1] >> 5) ^ (seq[3]) ^ (seq[4] >> 7) ^ (seq[5] >> 5) ^ (seq[2] >> 2) ^ (seq[7])) & 1
t[58] = ((seq[3] >> 2) ^ (seq[7] >> 3) ^ (seq[3] >> 4) ^ (seq[1] >> 2) ^ (seq[4] >> 5) ^ (seq[6] >> 6) ^ (seq[1])) & 1
t[59] = ((seq[1] >> 4) ^ (seq[7] >> 7) ^ (seq[0] >> 7) ^ (seq[6] >> 6) ^ (seq[1] >> 6) ^ (seq[4]) ^ (seq[2] >> 4)) & 1
t[60] = ((seq[7] >> 2) ^ (seq[2] >> 4) ^ (seq[6]) ^ (seq[4] >> 7) ^ (seq[3] >> 7) ^ (seq[0] >> 7) ^ (seq[7] >> 1)) & 1
t[61] = ((seq[2] >> 3) ^ (seq[7] >> 3) ^ (seq[0] >> 6) ^ (seq[4] >> 1) ^ (seq[5] >> 1) ^ (seq[6] >> 5) ^ (seq[7])) & 1
t[62] = ((seq[2] >> 5) ^ (seq[2] >> 3) ^ (seq[5] >> 7) ^ (seq[3] >> 3) ^ (seq[7]) ^ (seq[4]) ^ (seq[1] >> 7)) & 1
t[63] = ((seq[7] >> 2) ^ (seq[7] >> 3) ^ (seq[5] >> 7) ^ (seq[2] >> 7) ^ (seq[3] >> 6) ^ (seq[0] >> 7) ^ (seq[6] >> 1)) & 1
s = [None]*8
s[0] = (t[0] << 7) | (t[1] << 6) | (t[2] << 5) | (t[3] << 4) | (t[4] << 3) | (t[5] << 2) | (t[6] << 1) | (t[7])
s[1] = (t[8] << 7) | (t[9] << 6) | (t[10] << 5) | (t[11] << 4) | (t[12] << 3) | (t[13] << 2) | (t[14] << 1) | (t[15])
s[2] = (t[16] << 7) | (t[17] << 6) | (t[18] << 5) | (t[19] << 4) | (t[20] << 3) | (t[21] << 2) | (t[22] << 1) | (t[23])
s[3] = (t[24] << 7) | (t[25] << 6) | (t[26] << 5) | (t[27] << 4) | (t[28] << 3) | (t[29] << 2) | (t[30] << 1) | (t[31])
s[4] = (t[32] << 7) | (t[33] << 6) | (t[34] << 5) | (t[35] << 4) | (t[36] << 3) | (t[37] << 2) | (t[38] << 1) | (t[39])
s[5] = (t[40] << 7) | (t[41] << 6) | (t[42] << 5) | (t[43] << 4) | (t[44] << 3) | (t[45] << 2) | (t[46] << 1) | (t[47])
# noinspection PyInterpreter
s[6] = (t[48] << 7) | (t[49] << 6) | (t[50] << 5) | (t[51] << 4) | (t[52] << 3) | (t[53] << 2) | (t[54] << 1) | (t[55])
s[7] = (t[56] << 7) | (t[57] << 6) | (t[58] << 5) | (t[59] << 4) | (t[60] << 3) | (t[61] << 2) | (t[62] << 1) | (t[63])
return s
result=bytearray()
for i in range(len(e)/8): #8 is the amount of bits this functions transforms
result.extend(transform(get_block(e,i,8)))
return result
def encipher_block(text, key, rounds, block_size, encipher):
"""encipher or decipher the given text (True to encipher and False to decipher), using the given key(not expanded)
the algorithm will do nr rounds. For now, the only block_size ssupported is 16, and the rounds should also be 16
"""
expanded_key = key_expand(key, rounds * block_size)
key = [[None]*block_size]*rounds
for i in range(rounds):
if encipher:
key[i] = get_block(expanded_key, i, block_size)
else:
key[rounds-1-i] = get_block(expanded_key, i, block_size)
half_block = block_size/2
#intializing left[0] and right[0]
left_prev = [0]*half_block
left = [0]*half_block
right_prev = [0]*half_block
right = [0]*half_block
temporary = [0]*half_block
for j in range(half_block):
left_prev[j] = text[j] # left zero
right_prev[j] = text[j + half_block] # right zero
#starting alpha iterations (rounds)
for i in range(rounds):
# left[i]
for j in range(half_block):
left[j] = right_prev[j] ^ key[i][j] # key[j]=Ke[j]
# right[i]
for j in range(half_block):
temporary[j] = right_prev[j]
if not encipher:
for j in range(half_block):
temporary[j] = temporary[j] ^ key[i][j] # if deciphering, do a xor with Ke
right = l_alpha(temporary)
for j in range(half_block):
temporary[j] = (right[j] ^ key[i][j+half_block]) # Kd[j]== key[j+8]
for j in range(half_block):
byte = temporary[j]
n_alpha(byte)
temporary[j] = byte
for j in range(half_block):
right[j] = (left_prev[j] ^ temporary[j])
left_prev[j] = left[j]
right_prev[j] = right[j]
for j in range(half_block): # text = right | left (last permutation)
text[j] = right[j]
text[j+half_block] = left[j]
return text
def pad(block, size):
"""add padding bytes to the block until it reaches the defined size,
"""
if len(block) % size !=0:
bytes_needed = size - (len(block) % size)
for i in range(bytes_needed):
block.append(bytes_needed)
def unpad(padded_block):
"""removes the bytes that were added with pad"""
#note: if no byte was padded and by coincidence, the last byte is 1, it will be removed even if it shouldn't
added_bytes = padded_block[-1]
if padded_block[-2] == padded_block[-1]:
for i in range(added_bytes):
padded_block.pop()
def encipher(input_file, key, rounds, block_size, enciphering, output_file):
"""encipher or decipher a file, independent of the size. original name file is kept in first block, original
extension is kept in the second block. input_file must be the whole absolute path to the file (including
name and extension), when deciphering, output_file should be a path to a directory (name and file extensions should
be in the first and second blocks)
"""
bytes = bytearray()
if enciphering: # storing file extension in first block
file_name = input_file[input_file.rfind(os.sep)+1:]
extension = file_name[file_name.find(".")+1:]
bytes = bytearray()
for c in extension:
bytes.append(ord(c))
pad(bytes, block_size)
bytes.extend(read_file(input_file))
pad(bytes, block_size)
result = bytearray()
for i in range(len(bytes)/block_size):
result.extend(encipher_block(get_block(bytes, i, block_size), key, rounds, block_size, enciphering))
if not enciphering: # retrieving original extension from first block
extension_bytes = get_block(result, 0, block_size)
unpad(extension_bytes)
extension = ""
for b in extension_bytes:
extension += chr(b)
output_file = output_file + "." + extension
result = result[block_size:] # ignoring first block because it just contained the extension, not relevant info
unpad(result)
f = open(output_file, "w+")
f.write(result)
f.close()
"""
bs = 32 #block size
print "plain text: ",
plain_text=[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
print plain_text
print "key: ",
key=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
print key
print "cipher text: ",
cipher_text = encipher_block(plain_text, key, 16, bs, True)
print cipher_text
print "deciphered: ",
deciphered = encipher_block(cipher_text, key, 16, bs, False)
print deciphered
""" | {"/gui.py": ["/alpha.py"]} |
54,460 | yurifw/Alpha | refs/heads/master | /gui.py | #!/usr/bin/python
from Tkconstants import RIGHT, LEFT, RAISED, X, BOTTOM, TOP, END
from Tkinter import Tk, Frame, BOTH, Entry, BooleanVar, LabelFrame
import tkFileDialog
from ttk import Button, Style, Label, Combobox, Radiobutton
import alpha
class Example(Frame):
input_method = ""
def __init__(self, parent):
Frame.__init__(self, parent, background="white")
self.parent = parent
self.init_gui()
def init_gui(self):
#Events
def change_text_button():
if encrypt.get():
btn_encrypt.config(text="Criptografar")
else:
btn_encrypt.config(text="Descriptografar")
def generate_key():
directory = tkFileDialog.asksaveasfilename(defaultextension='.key')
alpha.generate_random_key(16, directory)
def set_input_key_method(event):
self.input_method = event.widget.get()
txt_chave.delete(0, END)
if self.input_method == "Arquivo":
txt_chave.insert(0, tkFileDialog.askopenfilename())
def set_arquivo_entrada():
txt_arquivo_entrada.delete(0, END)
txt_arquivo_entrada.insert(0, tkFileDialog.askopenfilename())
def set_arquivo_saida():
txt_arquivo_saida.delete(0, END)
txt_arquivo_saida.insert(0, tkFileDialog.asksaveasfilename())
def run_alpha():
key = alpha.read_file(txt_chave.get())
alpha.encipher(txt_arquivo_entrada.get(), key, 16, 16, encrypt.get(), txt_arquivo_saida.get())
self.parent.title("Alpha Cipher")
self.style = Style()
self.style.theme_use("default")
frame_radio = Frame(self, border=1)
frame_radio.pack(fill=X, expand=1, side=TOP)
encrypt = BooleanVar()
rad_encrypt = Radiobutton(frame_radio, text="Criptografar", variable=encrypt, value=True, command=change_text_button)
rad_encrypt.pack(fill=BOTH, expand=1, side=RIGHT)
rad_decrypt = Radiobutton(frame_radio, text="Descriptografar", variable=encrypt, value=False, command=change_text_button)
rad_decrypt.pack(fill=BOTH, expand=1, side=RIGHT)
btn_gerar_chave = Button(self, text="Gerar Chave", command=generate_key)
btn_gerar_chave.pack(fill=X, expand=1)
frame_label = Frame(self, border=1)
frame_label.pack(fill=X, expand=1, side=TOP)
lbl_metodo = Label(frame_label, text="Metodo de entrada da chave:")
lbl_metodo.pack(fill=BOTH, expand=1, side=LEFT)
cbo_entrada = Combobox(frame_label, state='readonly', values=['Decimal', 'Hexadecimal', 'Arquivo'])
cbo_entrada.bind('<<ComboboxSelected>>', set_input_key_method)
cbo_entrada.current(1)
cbo_entrada.pack(fill=BOTH, expand=1, side=RIGHT)
txt_chave = Entry(self)
txt_chave.pack(fill=X, expand=1)
frame_entrada = LabelFrame(self, text="Arquivo de Entrada")
frame_entrada.pack(fill=BOTH, expand=1, side=TOP)
txt_arquivo_entrada = Entry(frame_entrada)
txt_arquivo_entrada.pack(fill=X, expand=1, side=LEFT)
btn_arquivo_entrada = Button(frame_entrada, text="Pesquisar", width=9, command=set_arquivo_entrada)
btn_arquivo_entrada.pack(fill=X, expand=0, side=RIGHT)
frame_saida = LabelFrame(self, text="Arquivo de Saida")
frame_saida.pack(fill=BOTH, expand=1, side=TOP)
txt_arquivo_saida = Entry(frame_saida)
txt_arquivo_saida.pack(fill=X, expand=1, side=LEFT)
btn_arquivo_saida = Button(frame_saida, text="Pesquisar", width=9, command=set_arquivo_saida)
btn_arquivo_saida.pack(fill=X, expand=0, side=RIGHT)
btn_encrypt = Button(self, text="Descriptografar", command=run_alpha)
btn_encrypt.pack(fill=X, expand=1)
self.pack(fill=X, expand=1)
def center_window(self):
w = 290
h = 200
sw = self.parent.winfo_screenwidth()
sh = self.parent.winfo_screenheight()
x = (sw - w)/2
y = (sh - h)/2
self.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))
def main():
root = Tk()
app = Example(root)
app.center_window()
root.mainloop()
if __name__ == '__main__':
main()
| {"/gui.py": ["/alpha.py"]} |
54,487 | PiCindy/Data-Science | refs/heads/main | /extraction.py | import re
import nltk
import wptools
import wikipedia
import pandas as pd
from SPARQLWrapper import SPARQLWrapper, JSON
def create_list(q, k):
'''
Creation of a list of k persons of category q.
Input:
q (str): category id
k (int): nb of persons
Output:
List of k persons id of category q
'''
# Creating the SPARQL query
query = "select distinct ?item where {?item wdt:P31 wd:Q5; wdt:P106 wd:%s.}" %(q)
# Creating the Wrapper object
sparql = SPARQLWrapper("http://query.wikidata.org/sparql", agent='sparqlwrapper 1.8.5 (rd-flib.github.io/sparqlwrapper)')
sparql.setQuery(query)
# Set the results format
sparql.setReturnFormat(JSON)
# Getting the results
results = sparql.query().convert()
# We keep the k+delta first results to be sure to have enough persons with all information needed
return [result['item']['value'].split('/')[-1] for result in results["results"]["bindings"][:k+max(k//2,10)]]
def title_desc(identifier):
'''
Extraction of title and description of a person's page.
Input:
identifier (str): person id
Output:
Title of the page
Description of the page
'''
# Opening the person's page
page = wptools.page(wikibase=identifier)
# Getting data about this page
page.get_wikidata()
# Returning title and description
return page.data['title'], page.data['description']
def create_data(persons, category, k, n):
'''
Fills a list data with each person info
Input:
persons (list): list of persons
category (str): category of the list
k (int): number of persons per category
n (int): number of sentences per person
'''
data = []
# Finding the type of the category
t = ''
if category == 'singer' or category == 'writer' or category == 'painter':
t = 'A'
elif category == 'architect' or category == 'politician' or category == 'mathematician':
t = 'Z'
# Getting data for k persons
i = 0
while len(data) < k and i < len(persons):
# Getting the current person
p = persons[i]
i+=1
try:
# Getting title and description of current person
title, desc = title_desc(p)
# Accessing the person's page
page = wikipedia.page(title, auto_suggest=False)
# Removing section names and line breaks in the summary
summary = re.sub('==.+==', '', page.content).replace('\n', ' ')
# Tokenizing sentences and converting it into a string
sentences = ' '.join(nltk.sent_tokenize(summary)[:n])
# Adding the list of person's info in data
data.append([title, category, t, desc, sentences])
# If an exception is found, we cannot have all needed elements and we ignore this person
except wikipedia.exceptions.PageError:
continue
except LookupError:
continue
except wikipedia.DisambiguationError:
continue
return data
def extraction(k=30, n=5):
'''
Corpus extraction
Parameters:
k (int): number of persons per category - default value: 30
n (int): number of sentences per person - default value: 5
'''
# Creation of the lists of k persons with category identifier
singers = create_list("Q177220", k)
writers = create_list("Q36180", k)
painters = create_list("Q1028181", k)
architects = create_list("Q42973", k)
politicians = create_list("Q82955", k)
mathematicians = create_list("Q170790", k)
# Listing the categories
variables = [singers, writers, painters, architects, politicians, mathematicians]
categories = ['singer', 'writer', 'painter', 'architect', 'politician', 'mathematician']
# Creating an empty list which will contain data
data = []
# Extending data for each category of persons
for v, c in zip(variables, categories):
data.extend(create_data(v, c, k, n))
# Converting the list of lists into dataframe
df = pd.DataFrame(data, columns = ['person', 'category', 'type', 'description', 'text'])
# Storing the dataframe in a csv file
df.to_csv('data/data.csv', index=False)
if __name__ == "__main__":
# Running the extraction function
extraction()
| {"/main.py": ["/extraction.py", "/clustering.py", "/preprocessing.py", "/classification.py"]} |
54,488 | PiCindy/Data-Science | refs/heads/main | /clustering.py | import pandas as pd
from nltk import word_tokenize
from sklearn import metrics
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import AgglomerativeClustering
def clustering(data, n, m):
"""Clustering algorithm
Input:
data (Series): serie of preprocessed texts
n (int): number of clusters
m (str): representation method
Output:
Predicted labels of clusters, matrix of clusters, number of clusters
"""
# First case: tf-idf method
if m == 'tf-idf':
# Creating the TFIDF vectorizer
vectorizer = TfidfVectorizer(max_features=8000, # We want 8000 features
use_idf=True,
stop_words='english', # The stop words to be removed
tokenizer=word_tokenize, # The way of tokenizing
ngram_range=(1, 3))
# Second case: token frequency method
elif m == 'token frequency':
# Creating the Token frequency vectorizer
vectorizer = CountVectorizer(binary=False)
# Third case: tokens method
elif m == 'tokens':
# Creating the Tokens vectorizer
vectorizer = CountVectorizer(binary=True)
# Fitting the model with data
x_count = vectorizer.fit_transform(data)
# Converting the sparse matrix into matrix
matrix = x_count.todense()
# After having tried with KMeans, we observed better results for Agglomerative clustering
#km = KMeans(n_clusters=n, init='k-means++', max_iter=300, n_init=30, verbose=0, random_state=3425)
#km.fit(matrix)
# Using the agglomerative clustering algorithm
ac = AgglomerativeClustering(n_clusters=n, affinity='euclidean', memory=None, connectivity=None,
compute_full_tree='auto', linkage='ward', distance_threshold=None,
compute_distances=False)
# Fitting the matrix
ac.fit(matrix)
# Renaming the labels
pred_labels = ac.labels_
return pred_labels, matrix, n
def scores(data, pred_labels, matrix, n):
"""
Compute evaluation scores
Output: silhouette coeff, homogeneity, completeness, v-measure, adjusted Rand index
"""
# Calling this score first, as it does not depend on the true labels
sil = metrics.silhouette_score(matrix, pred_labels, sample_size=1000)
# If there are 6 clusters, the labels refer to the category
if n == 6:
labels = data["category"]
# If there are 2 clusters, the labels refer to the type (A or Z)
elif n == 2:
labels = data["type"]
# If there is another number of clusters, they cannot be associated to any specific label
else:
return sil, None, None, None, None
# Computing the rest of the metrics
homo = metrics.homogeneity_score(labels, pred_labels)
compl = metrics.completeness_score(labels, pred_labels)
vm = metrics.v_measure_score(labels, pred_labels)
rand = metrics.adjusted_rand_score(labels, pred_labels)
return sil, homo, compl, vm, rand
def visualization(data, pred_labels, matrix, n):
"""
Visualise metrics for each input representation
5 scores for each possible result (2/6 clusters, token/tokens freq/tf-idf)
Output: Print each score
"""
# Running the scores() function, and storing the results
silhouette, homogeneity, completeness, v_measure, rand_index = scores(data, pred_labels, matrix, n)
# Printing all the results
print("Intrinsic scores:")
print("Silhouette coefficient:", silhouette)
print("Extrinsic scores:")
print("Homogeneity:", homogeneity)
print("Completeness:", completeness)
print("V-measure:", v_measure)
print("Adjusted Rand index:", rand_index)
def main(data):
# Listing the 3 methods to be tested
methods = ['tf-idf', 'token frequency', 'tokens']
# Listing the numbers of clusters to be tested
clusters = [2, 6]
# Iterating over methods
for m in methods:
# For each method, iterating over the numbers of clusters
for c in clusters:
# Displaying which method and the number of clusters are used
print(f'Clustering results using {c} clusters and method {m}')
# Running the clustering and visualization functions
visualization(data, *clustering(data["processed_text"], c, m))
# Print a blank line to separate different tests
print()
# Launch the whole program
if __name__ == "__main__":# Importing the data to be used as input
data = pd.read_csv('data/processed_data.csv', sep=',')
main(data)
| {"/main.py": ["/extraction.py", "/clustering.py", "/preprocessing.py", "/classification.py"]} |
54,489 | PiCindy/Data-Science | refs/heads/main | /main.py | import extraction
import clustering
import preprocessing
import classification
import pandas as pd
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classification', dest='classification', action='store_true')
parser.add_argument('--no-classification', dest='classification', action='store_false')
parser.add_argument('--clustering', dest='clustering', action='store_true')
parser.add_argument('--no-clustering', dest='clustering', action='store_false')
parser.set_defaults(classification=True, clustering=True)
parser.add_argument('-p', '--parameters', type=int, nargs='+', help='Choose nb of persons per category and nb of sentences per person')
args = parser.parse_args()
data = pd.read_csv('data/data.csv', sep=',')
if args.parameters:
extraction.extraction(args.parameters[0], args.parameters[1])
preprocessing.main(data)
processed_data = pd.read_csv('data/processed_data.csv', sep=',')
if args.clustering:
clustering.main(processed_data)
if args.classification:
classification.main(processed_data)
| {"/main.py": ["/extraction.py", "/clustering.py", "/preprocessing.py", "/classification.py"]} |
54,490 | PiCindy/Data-Science | refs/heads/main | /preprocessing.py | import nltk
import pandas as pd
from nltk.corpus import stopwords
def preprocessing(text):
"""
Application of all preprocessing methods on the text.
Input:
text (string): a Wikipedia summary or Wikidata description
Output:
processed: the text after preprocessing
"""
# Tokenize the text
processed = nltk.word_tokenize(text)
# Lowercase the tokens
processed = [token.lower() for token in processed]
# Remove stop words
en_stopwords = stopwords.words('english')
processed = [token for token in processed if token not in en_stopwords]
# Returns the string of tokens
return ' '.join(processed)
def main(df):
# Creating a new dataframe
data = pd.DataFrame()
# Creating the dataframe
data['person'] = df['person']
data['text'] = df['text']
# Apply preprocessing methods to texts
data['processed_text'] = df['text'].apply(preprocessing)
data['description'] = df['description']
# Apply preprocessing methods to descriptions
data['processed_description'] = df['description'].apply(preprocessing)
data['type'] = df['type']
data['category'] = df['category']
# Storing the processed data in a csv file
data.to_csv('data/processed_data.csv', index=False)
if __name__ == "__main__":
# Importing data
df = pd.read_csv('data/data.csv', sep=',')
main(df)
| {"/main.py": ["/extraction.py", "/clustering.py", "/preprocessing.py", "/classification.py"]} |
54,491 | PiCindy/Data-Science | refs/heads/main | /classification.py | import nltk
import pandas as pd
import sklearn
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix
from sklearn.feature_extraction.text import TfidfVectorizer # import tf-idf vectorizer from sklearn
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
def get_classification_results(y_gold, y_pred):
"""
Computes classification results, such as accuracy, precision, recall, F1 score, confusion matrix
using true values and predicted ones.
Input:
y_gold (Array-like of ints): true values from the dataset
y_pred (Array-like of ints): predicted values from the algorithm
Output:
Accuracy, confusion matrix and classification report (contains precision, recall and F1 score)
"""
# Computing the accuracy score
accuracy = accuracy_score(y_gold, y_pred)
# Computing the confusion matrix
conf_matrix = confusion_matrix(y_gold, y_pred)
# Computing the classification report
classification_rep = classification_report(y_gold, y_pred)
# Returning the results
return accuracy, conf_matrix, classification_rep
def visualize_classification_results(name, tested_on, y_gold, y_pred):
"""
Prints classification results, such as accuracy, precision, recall, F1 score, confusion matrix
using true values and predicted ones.
Input:
name (String): name of the algorithm tested
tested_on (String): what was the goal of prediction (categories, types, etc)
y_gold (Array-like of ints): true values from the dataset
y_pred (Array-like of ints): predicted values from the algorithm
Output:
accuracy, confusion matrix and classification report (contains precision, recall and F1 score)
"""
# Displaying which algorithm was used and the goal
print(f"Classification results of testing {name} on {tested_on}.\n")
# Computing results
acc, conf_matrix, class_rep = get_classification_results(y_gold, y_pred)
# Printing the results
print("Accuracy:", acc)
print("Confusion matrix:\n", conf_matrix)
print("Classification report:\n", class_rep)
print("\n")
def run_classification(data):
"""
Runs classification on several algorithms, also prints the results.
Stratification is used on data so that the train and test datasets were balanced based on categories.
Input:
data (Dataframe): dataframe with labels "processed_text", "category" and "type"
"""
# Creating the TFIDF vectorizer
tfidf_vectorizer = TfidfVectorizer(max_features=8000, # We want 8000 features
use_idf=True,
stop_words='english', # The stop words to be removed
tokenizer=nltk.word_tokenize, # The way of tokenizing
ngram_range=(1, 3))
# Fitting the model with data
X_tfidf = tfidf_vectorizer.fit_transform(data["processed_text"])
# Extracting y
y_cat = data['category']
y_type = data['type']
# Splitting data into train and test samples
X_train, X_test, y_cat_train, y_cat_test, y_type_train, y_type_test = train_test_split(X_tfidf, y_cat, y_type,
test_size=0.3, stratify=y_cat)
# Listing different algorithms used and creating the models
classification_algos = [SGDClassifier(max_iter=1000, tol=1e-3),
SVC(gamma='auto'),
MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(10, 5), random_state=1)]
# Fitting the models, predicting labels, and visualizing results for each algorithm
for algo in classification_algos:
# Fitting the model with categories
algo.fit(X_train, y_cat_train)
# Predicting the category
y_pred = algo.predict(X_test)
# Visualizing results
visualize_classification_results(algo.__class__.__name__, "categories", y_cat_test, y_pred)
# Fitting the model with types
algo.fit(X_train, y_type_train)
# Predicting the type
y_pred = algo.predict(X_test)
# Visualizing results
visualize_classification_results(algo.__class__.__name__, "types", y_type_test, y_pred)
def main(data):
# Running the classification method
run_classification(data)
if __name__ == "__main__":
# Importing the data to be used as input
data = pd.read_csv('data/processed_data.csv')
main(data)
| {"/main.py": ["/extraction.py", "/clustering.py", "/preprocessing.py", "/classification.py"]} |
54,492 | Delsoc/PCA2 | refs/heads/master | /main.py | #from docutils.nodes import paragraph
import Init
import stimulus_activation
import unit_adaption
import numpy as np
import UnitSpecificDimensionLineFitting
import matplotlib.pyplot as plt
def main():
# 1: PREALLOCATION AND INITIALIZATION
repetitions = 10
parameters, units = Init.initialize()
np.zeros((repetitions, parameters.rows))
outDimTotal = np.zeros((repetitions, parameters.rows))
# Main Loop
for g in range(0, repetitions):
print("g_loop")
# reproducibility
# rng(g); //matlabcode , aber wofür? wird nix mit gemacht und funktoniert genau gleich ohne den code
# Init Parameters and PCA
parameters, units = Init.initialize()
outDim = np.zeros((parameters.T, 1))
for loop in range(0, parameters.T):
# 2: Unit STIMULUS AND ACTIVATION
parameters, units = stimulus_activation.stimulus_activation(parameters, units)
# 3: UNIT ADAPTATION
parameters, units = unit_adaption.unit_adaption(parameters, units)
# 4: ADJUSTING UNIT SPECIFIC DIMENSION
if units.protect == 0:
if parameters.criterion == 4:
parameters, units = UnitSpecificDimensionLineFitting.UnitSpecificDimensionLineFitting(parameters,
units)
else:
units.protect = units.protect - 1
# 5 Benchmark
outDim[loop] = units.outdimension
print("ende")
return
main()
| {"/main.py": ["/Init.py", "/stimulus_activation.py", "/unit_adaption.py", "/UnitSpecificDimensionLineFitting.py"], "/test.py": ["/Init.py"], "/Init.py": ["/Parameter.py", "/units.py"], "/unit_adaption.py": ["/eforrlsa.py"]} |
54,493 | Delsoc/PCA2 | refs/heads/master | /stimulus_activation.py | from copy import deepcopy
import math
import random
import numpy as np
def stimulus_activation(parameter,units):
parameter.xvalue = math.ceil((parameter.rows-1) * random.uniform(0.1, 1.0))#hier p.rows-1 ,bei matlab nicht
Shape =parameter.shape.astype(np.float)
parameter.x = Shape[parameter.xvalue, :].T
X = np.zeros((len(Shape[0]), 1))
for i in range(len(Shape[0])):
X[i] = Shape[parameter.xvalue, i]
#calculate neuron input and output (activation)
units.x_c = np.subtract(X, units.center)
units.y = np.dot(units.weight.T, units.x_c)
return (parameter,units) | {"/main.py": ["/Init.py", "/stimulus_activation.py", "/unit_adaption.py", "/UnitSpecificDimensionLineFitting.py"], "/test.py": ["/Init.py"], "/Init.py": ["/Parameter.py", "/units.py"], "/unit_adaption.py": ["/eforrlsa.py"]} |
54,494 | Delsoc/PCA2 | refs/heads/master | /UnitSpecificDimensionLineFitting.py | from copy import deepcopy
import math
import numpy as np
from scipy.linalg import orth
#from tables import parameters
def Find(liste, units, p):
for i in range(len(liste)):
if (liste[i] > units.totalVariance * p.dimThreshold):
return i
return 0
def UnitSpecificDimensionLineFitting(parameter, units):
if (units.variance < units.totalVariance * parameter.dimThreshold):
# Add n Dimensions
# Transform eigenvalues into log scale
logEigenvalues = np.log(units.eigenvalue)
x = np.zeros((units.outdimension, 1))
for i in range(0, units.outdimension):
x[i] = i+1
x=x.flatten()
logEigenvalues1D=logEigenvalues.flatten()
pp = np.polyfit(x, logEigenvalues1D, 1)
p = np.poly1d(pp)
x1 = np.arange(max(x)+1, parameter.columns + 1).T
approximatedEigenvaluesLog = p(1) * x1 + p(2)
# Transform back into normal scale
approximatedEigenvalues = np.zeros((len(approximatedEigenvaluesLog), 1))
#approximatedEigenvalues = math.exp(approximatedEigenvaluesLog)
for i in range(len(approximatedEigenvaluesLog)):
approximatedEigenvalues[i] = math.exp(approximatedEigenvaluesLog[i])
addedDim = Find(np.cumsum(abs(approximatedEigenvalues)) + units.variance, units, parameter)
# Zeile 23/24 vom matlab code nicht nötig, weil ich in function Find index auf den ersten Index (bei Python aber 0) gesetzt habe
if units.variance + sum(abs(approximatedEigenvalues)) < units.totalVariance * parameter.dimThreshold:
addedDim = 1 # HIER im ggs zu Matlab gleich 0 und nicht 1, aber kann sein dass 1 bei 1 anfangen sollte und dann -1 immer gemacht wird wegen überlauf
if addedDim + units.outdimension > parameter.columns:
addedDim = parameter.columns - units.outdimension
#hilfsvariablen
#test2 = np.random.rand(parameter.columns, 1) #hier eig nicht 1 , sondern addedDim
#test3 = orth(np.random.rand(parameter.columns, addedDim))
if addedDim == 0:
randOrth = np.random.rand(parameter.columns, 1)
else:
randOrth = orth(np.random.rand(parameter.columns, addedDim))
units.weight = np.hstack((units.weight, randOrth))
#units.eigenvalue = (units.eigenvalue).append(approximatedEigenvalues[addedDim])
if addedDim == 0:
appendApproximatedEigenvalues = approximatedEigenvalues[addedDim]
else:
appendApproximatedEigenvalues = np.zeros((addedDim, 1))
for i in range(0,addedDim+1):
appendApproximatedEigenvalues = np.append(appendApproximatedEigenvalues, approximatedEigenvalues[i])
units.eigenvalue = np.append(units.eigenvalue, appendApproximatedEigenvalues)
size = len(units.eigenvalue)
units.eigenvalue = np.reshape(units.eigenvalue, (size, 1))
units.outdimension = units.outdimension + addedDim #addedDim nochmal genauer angucken
units.realDim = units.outdimension
units.y = np.zeros((units.outdimension, 1))
ZeroElement = np.array([0])
for i in range(addedDim+2): #addedDim wahrscheinlich überarbeiten
units.mt = np.append(units.mt, ZeroElement)
units.gy = np.zeros((units.outdimension, 1))
units.protect = 10
print('%s %d' % ("Dimension: ", units.outdimension))
else:
if sum(units.eigenvalue[:len(units.eigenvalue) - 1]) > (units.totalVariance * parameter.dimThreshold):
# Remove 1 Dimension
if units.outdimension > 2:
units.outdimension = units.outdimension - 1
units.weight = np.delete(units.weight, len(units.weight), 1)
units.eigenvalue = units.eigenvalue[:-1]
units.y = units.y[:-1]
units.mt = units.mt[:-1]
units.gt = units.gt[:-1]
units.protect = 10
print('%s %d' % ("Dimension: ", units.outdimension))
# else:
# Do Nothing
# In thise case the recent amount of Dimensions is above the Threshold
# but removing the last Dimension would cause a drop below the Threshold again!
if units.realDim == 1:
units.suggestedOutdimension = 1
else:
units.suggestedOutdimension = units.outdimension
return (parameter, units)
| {"/main.py": ["/Init.py", "/stimulus_activation.py", "/unit_adaption.py", "/UnitSpecificDimensionLineFitting.py"], "/test.py": ["/Init.py"], "/Init.py": ["/Parameter.py", "/units.py"], "/unit_adaption.py": ["/eforrlsa.py"]} |
54,495 | Delsoc/PCA2 | refs/heads/master | /units.py | import random
from copy import deepcopy
import numpy as np
import numpy.matlib
# rand=round(random.uniform(0.1, 1.0),4)
class units():
def __init__(self, pm):
# Protecs a Unit for n Iterations after the Dimension changed
self.protect = 100
# Unit specific Output Dimension
self.outdimension = pm.StartDim
self.suggestedOutdimension = pm.StartDim
self.realDim = pm.StartDim
# init centers by choosing N data points at random
tempDataVec = deepcopy(pm.shape[0]) ##immer die erste Zeile, soll das random sein oder geht das so?
for i in range(0, len(tempDataVec)): ##im matlabcode wird zu jedem datensatz jeweils der gleiche center-vector erstellt
tempDataVec[i] = tempDataVec[i] * round(random.uniform(0.1, 1.0), 4)
centerHelp = np.array([tempDataVec])
self.center = centerHelp.T
''' first m principal axes (weights)
orhonormal (as needed by distance measure) '''
self.weight, s, vh = np.linalg.svd(np.random.rand(pm.columns, self.outdimension), full_matrices=False)
# first m eigenvalues
self.eigenvalue = np.matlib.repmat(pm.lambda_init, self.outdimension, 1)
# residual variance in the minor (d - m) eigendirections
self.sigma = pm.lambda_init
# deviation between input and center
self.x_c = np.zeros((pm.columns, 1))
# unit output (activation) for input x_c
self.y = np.zeros((self.outdimension, 1))
# unit matching measure
self.mt = np.zeros((self.outdimension, 1))
# unit summarized matching measure
self.Dt = 1.0
# Unit variance
self.variance = 0
# Unit total variance
self.totalVariance = 0
# global learning rate
self.alpha = pm.epsilon_init
# wird in UnitSpecificDimensionLineFitting benutzt
self.gy = np.zeros((self.outdimension, 1))
| {"/main.py": ["/Init.py", "/stimulus_activation.py", "/unit_adaption.py", "/UnitSpecificDimensionLineFitting.py"], "/test.py": ["/Init.py"], "/Init.py": ["/Parameter.py", "/units.py"], "/unit_adaption.py": ["/eforrlsa.py"]} |
54,496 | Delsoc/PCA2 | refs/heads/master | /test.py | import Init
import random
import numpy as np
import numpy.matlib
#import eforrlsa
from copy import deepcopy
#c = np.matlib.repmat(5, 3,1)
#print(np.dot(c.T,c))
'''pm = Parameter.Parameter()
print(pm.criterion)
print(pm.shape[0])
init.initialize()
print(pm.rows)
print(pm.columns)
print(pm.log_precomp)
parameters,units = Init.initialize()
print(parameters.shape[0])
b=parameters.shape[0]*2
print(b)
#a = [[0.020751949359402,0.953393346194937],[0.633648234926275,0.003948266327914],[0.748803882538612,0.512192263385777],[0.498507012302590,0.812620961652114],[0.224796645530848,0.612526066829388],[0.198062864759624,0.721755317431800],[0.760530712198959,0.291876068170633],[0.169110836562535,0.917774122512943],[0.088339814174010,0.714575783397691],[0.685359818367797,0.542544368011261]]
u, s, vh = np.linalg.svd(np.random.rand(10,2), full_matrices=False)
#h, tau = np.linalg.qr(a)
out_mat = np.matlib.repmat(7, 2, 2)
print(out_mat)
parameters,units = Init.initialize()
EditUnits = eforrlsa.eforrlsa(units)
print(EditUnits.Dt)
for i in range(0,2):
print(i)
a=2
if a==2:
print("right")
else:
print("wrong")
if a==22:
print("c")
EFO_p = np.zeros((2, 1))
print(EFO_p)
print(EFO_p[1]*EFO_p[1])
parameters,units = Init.initialize()
a=units.y[0]
print(a)
parameters,units = Init.initialize()
print(units.weight)
print(units.weight[0][0])
#for i in range(0,len(units.weight)):
# units.weight[i][1]=2
print(units.weight)
print("neuer Test:")
units.weight[...,1]=2
print(units.weight)
c=units.weight[...,1]
print(c)
for i in range(0,6):
print("i: ",i)
for j in range(0,i+1):
print(j)
a = [1, 1, 4.5, 3, 2, 2+1j]
b = np.isreal(a)
real=0
for k in range(0,len(b)):
if b[k]==False:
real=1
print(real)
a=-1
print(abs(a))
parameters,units = Init.initialize()
units.eigenvalue[0] = 400
print(units.eigenvalue)
units.eigenvalue[:] = units.eigenvalue[::-1]
print(units.eigenvalue)
print(sum(units.eigenvalue))
editUnits = deepcopy(units)
parameters,units = Init.initialize()
print(units.mt)
units.mt[0] = 43
units.mt[1] = -26
units.mt = units.mt/2
units.mt = abs(units.mt)
minimum = min(units.mt)
print(units.mt)
print(minimum)
# alphabets list
import test2
alphabets = [1,2,3,4,5,6,7,8,8]
test2.test1(alphabets)
print(alphabets)
testarray = np.zeros((10, 1))
testarray += 5
transponiert = testarray.T
Produkt = transponiert * testarray
Produkt2 = testarray * transponiert
for i in range(1000):
a=random.uniform(0.1, 1.0)
print(a)
a = np.random.rand(10, 1)
print(a)
print("b:")
b= np.hstack((a,a))
print(b)
print('%s %d' % ("namen: ",1))
a = np.random.rand(3, 1)
print(a)
print(sum(a))
print("b")
b= a[:len(a)-1]
print(b)
a = np.arange(12).reshape(3, 4)
print(a)
b = np.delete(a,len(a),1)
print(b)
a = np.zeros((3, 1))
print(a)
b= a.T
print(b)
print(4)'''
#ERG = array.flatten()
x = np.array([1, 2])
y = np.array([0.7715, -0.0530])
z= np.polyfit(x, y, 1)
print(z)
| {"/main.py": ["/Init.py", "/stimulus_activation.py", "/unit_adaption.py", "/UnitSpecificDimensionLineFitting.py"], "/test.py": ["/Init.py"], "/Init.py": ["/Parameter.py", "/units.py"], "/unit_adaption.py": ["/eforrlsa.py"]} |
54,497 | Delsoc/PCA2 | refs/heads/master | /Parameter.py | import scipy.io as sio
import math
'''Datasets:
0. cameraman
1. circles
2. PHM
3. careercon
4. waveData
5. waveDataNoNoise
'''
Datasets = ["cameraman.mat", "circles.mat", "PHM.mat", "careercon.mat", "waveData.mat", "waveDataNoNoise.mat"]
'''criterion:
0. Eigenvalue-Ooe
1. Eigenvalue-average
2. Percentage of total variance
3. Cumulative percentage of total variance
4. Kirsch et al. approach
'''
class Parameter():
def __init__(self):
self.x = 0 #erstmal einfach irgendein Wert, damit es erstellt ist, um es in stimulus_activation ändern zu können
self.Dataset = 3 #0-5 #oben beschrieben
self.criterion = 4 #noch useless #oben beschrieben
self.temp = sio.loadmat(Datasets[self.Dataset])
self.shape = self.temp['data']
self.StartDim = 2
self.tau = 0
self.sigmaMean = 0
self.dimThreshold = 0.90
#learningrate
self.epsilon_init = 0.5
self.epsilon_final = 0.001
#Neighborhood range
self.rho_init = 2
self.rho_final = 0.01
#initial variance
self.lambda_init = 1000.0
#number of data points and input dimension
self.columns = len(self.shape[0])
self.rows = len(self.shape)
#number of total iterations
self.T = self.rows
self.helper = 0
#Adaptive Lernratensteuerung fur Neural Gas Principal Component Analysis.
self.mu = 0.005
self.xvalue =0
self.logArgMinLimit = 1e-323
self.phi = 2.0
self.udmLogBase = 10.0
self.log_precomp = math.log(self.udmLogBase).__round__(4) | {"/main.py": ["/Init.py", "/stimulus_activation.py", "/unit_adaption.py", "/UnitSpecificDimensionLineFitting.py"], "/test.py": ["/Init.py"], "/Init.py": ["/Parameter.py", "/units.py"], "/unit_adaption.py": ["/eforrlsa.py"]} |
54,498 | Delsoc/PCA2 | refs/heads/master | /eforrlsa.py | #EFORRLSA (Moeller, 2002)
#Interlocking of learning and orthonormalization in RRLSA
import numpy as np
import math
from copy import deepcopy
def eforrlsa(units):
#Anlegen der lokalen Variablen
V = deepcopy(units.weight)
EFO_L2 = np.zeros((units.outdimension, 1))
EFO_p = np.zeros((units.outdimension, 1))
EFO_q = np.zeros((units.outdimension, units.outdimension))
EFO_r = np.zeros((units.outdimension, units.outdimension))
#Algorithmus Gleichungn 3-12
for i in range(0,units.outdimension): #Notiz : wenn units.outdimension = 2 , dann i = 0 & 1
''' Hilfsvariablen
alpha * Eigenwert
Anderes Alpha als im Hauptalgo. Dieses Alpha läuft gegen 1 während
EFO_d = np.dot(units.x_c.T,units.x_c)
else:
EFO_t = EFO_t + EFO_p[i-1] * EFO_p[i-1]
das normale alpha gegen 0 läuft. Deshalb hier 1-alpha
'''
helperVariable1 = (1 - units.alpha) * units.eigenvalue[i]
#beta * Output
#gleicher Verlauf wie Alpha aus Hauptalgorithmus
helperVariable2 = units.alpha * units.y[i]
#Init und update von t und d nach Gleichung 5+6
if i==0:
EFO_t = 0
EFO_d = np.dot(units.x_c.T,units.x_c)
else:
EFO_t = EFO_t + EFO_p[i-1] * EFO_p[i-1]
EFO_d = EFO_d - units.y[i-1] * units.y[i-1]
if EFO_d < 0:
EFO_d = 0
#Gleichung 7
EFO_s = (helperVariable1 + units.alpha * EFO_d) * units.y[i]
#Gleichung 8
EFO_L2[i] = helperVariable1 * helperVariable1 + helperVariable2 * (helperVariable1 * units.y[i] + EFO_s)
#Gleichung 9
EFO_n2 = EFO_L2[i] - EFO_s * EFO_s * EFO_t
#ensure that EFO_n2 > 0
'''Fragen: ist bei matlab nicht 1e-100 nicht schon LÄNGST wie 0?
wofür Zeile 54/55?
'''
if EFO_n2 < 1e-100:
EFO_n2 = 1e-100
EFO_n = math.sqrt(EFO_n2)
#Gleichung 12
EFO_p[i] = (helperVariable2 - EFO_s * EFO_t) / EFO_n
#Berechnung vom 2 additiven Termen in Gleichung 4 (siehe IndexBezeichnung)
#Jede Zeile der Spalte i mit dem gleichen Ergebnis füllen:
for j in range(len(units.weight[:,0])):
units.weight[j, i] = EFO_p[i] * units.x_c[j]
#units.weight[:, i] = EFO_p[i] * units.x_c
#for j in range(len(units.weight[0])):
# units.weight[j, i] = EFO_p[i] * units.x_c
for i2 in range(0,i+1):#hier im gegensatz zum Matlab-Code noch i+1m weil Python j bis i-1 laufen lässt, bei matlab nicht
#evtl. denkfehler
#Gleichung 10+11
if i2<i:
EFO_r[i, i2] = EFO_r[i - 1, i2] + EFO_p[i - 1] * EFO_q[i - 1, i2]
EFO_q[i, i2] = -(helperVariable2 * units.y[i2] + EFO_s * EFO_r[i, i2]) / EFO_n
else:
EFO_r[i, i2] = 0
EFO_q[i, i2] = helperVariable1 / EFO_n
#Gleichung 4
units.weight[..., i] = units.weight[..., i] + (EFO_q[i, i2] * V[..., i2])
units.eigenvalue = np.sqrt(EFO_L2)
#hier Matlab-Code etwas verändern
b= np.isreal(units.eigenvalue)
real=0 #wenn b complex, dann real = .1 sonst real =0
for k in range(0, len(b)):
if b[k] == False:
real = 1
if real==1:
units.eigenvalue = abs(units.eigenvalue)
return units | {"/main.py": ["/Init.py", "/stimulus_activation.py", "/unit_adaption.py", "/UnitSpecificDimensionLineFitting.py"], "/test.py": ["/Init.py"], "/Init.py": ["/Parameter.py", "/units.py"], "/unit_adaption.py": ["/eforrlsa.py"]} |
54,499 | Delsoc/PCA2 | refs/heads/master | /test2.py | def test1(liste):
liste[0] = 3
return 0 | {"/main.py": ["/Init.py", "/stimulus_activation.py", "/unit_adaption.py", "/UnitSpecificDimensionLineFitting.py"], "/test.py": ["/Init.py"], "/Init.py": ["/Parameter.py", "/units.py"], "/unit_adaption.py": ["/eforrlsa.py"]} |
54,500 | Delsoc/PCA2 | refs/heads/master | /Init.py | #diese funktion ist nur da, um es "nah am MatLabCode" zu halten
import Parameter
import units
def initialize():
pm = Parameter.Parameter() #Parameter werden initialisiert (in Parameter.py)
us =units.units(pm) #noch units initialiseren
return (pm,us)
#als Rückgabewert noch Parameter und units zurückgeben | {"/main.py": ["/Init.py", "/stimulus_activation.py", "/unit_adaption.py", "/UnitSpecificDimensionLineFitting.py"], "/test.py": ["/Init.py"], "/Init.py": ["/Parameter.py", "/units.py"], "/unit_adaption.py": ["/eforrlsa.py"]} |
54,501 | Delsoc/PCA2 | refs/heads/master | /unit_adaption.py | from copy import deepcopy
from setuptools.command.easy_install import easy_install
import eforrlsa
import numpy as np
def unit_adaption(parameter, units):
units.alpha = (parameter.epsilon_init - parameter.epsilon_final) * units.Dt ** parameter.phi + parameter.epsilon_final
# Update the center of a unit, Eq. (3.8) Schenck dissertation (Neural Gas Step)
units.center = units.center + units.alpha * units.x_c
units= eforrlsa.eforrlsa(units)
# sortEigenvalues
# (getestet, sortiert absteigend)
units.eigenvalue[:] = units.eigenvalue[::-1]
units.variance = sum(units.eigenvalue)
if (parameter.columns != units.outdimension):
units.sigma = units.sigma + units.alpha * (
np.dot(units.x_c.T, units.x_c) - np.dot(units.y.T, units.y) - units.sigma)
if (units.sigma < 0):
units.sigma = parameter.logArgMinLimit
units.totalVariance = units.variance + units.sigma
else:
units.totalVariance = units.variance
# update internal unit state for adaptive learning rate control
units.Dt = 0.0
a_lowpass = parameter.mu
for i in range(0, units.outdimension): # Notiz : wenn units.outdimension = 2 , dann i = 0 & 1
units.mt[i] = units.mt[i] * (1 - a_lowpass) + units.y[i] * units.y[i] / units.eigenvalue[i] * a_lowpass
if (units.mt[i] > parameter.logArgMinLimit):
amin = np.amin(abs(np.log(units.mt[i]) / parameter.log_precomp))
if amin > 1: # gleiche Funktionalität wie in matlab min(A,1.0) -> falls A größer als 1, dann return 1.0
amin = 1.0
units.Dt = units.Dt + amin
else:
units.Dt = units.Dt + 1.0
units.Dt = units.Dt / units.outdimension
return (parameter, units)
| {"/main.py": ["/Init.py", "/stimulus_activation.py", "/unit_adaption.py", "/UnitSpecificDimensionLineFitting.py"], "/test.py": ["/Init.py"], "/Init.py": ["/Parameter.py", "/units.py"], "/unit_adaption.py": ["/eforrlsa.py"]} |
54,502 | RealGeeks/django-cache-purge-hooks | refs/heads/master | /cache_purge_hooks/backends/varnishbackend.py | import logging
import subprocess
from django.conf import settings
logger = logging.getLogger('django.cache_purge_hooks')
VARNISHADM_HOST = getattr(settings, 'VARNISHADM_HOST', 'localhost')
VARNISHADM_PORT = getattr(settings, 'VARNISHADM_PORT', 6082)
VARNISHADM_SECRET = getattr(settings, 'VARNISHADM_SECRET', '/etc/varnish/secret')
VARNISHADM_SITE_DOMAIN = getattr(settings, 'VARNISHADM_SITE_DOMAIN', '.*')
VARNISHADM_BIN = getattr(settings, 'VARNISHADM_ADM_BIN', '/usr/bin/varnishadm')
class VarnishManager(object):
def purge(self, url):
command = 'ban req.http.host ~ "{host}" && req.url ~ "{url}"'.format(
host=VARNISHADM_SITE_DOMAIN.encode('ascii'),
url=url.encode('ascii'),
)
self.send_command(command)
def purge_all(self):
self.purge('.*')
def send_command(self, command):
args = [VARNISHADM_BIN, '-S', VARNISHADM_SECRET, '-T', VARNISHADM_HOST+':'+str(VARNISHADM_PORT), command]
try:
subprocess.check_call(args)
except subprocess.CalledProcessError as error:
logger.error('Command "{0}" returned {1}'.format(' '.join(args), error.returncode))
return False
else:
logger.debug('Command "{0}" executed successfully'.format(' '.join(args)))
return True
| {"/sampleproject/sample/models.py": ["/cache_purge_hooks/__init__.py"], "/tests/test_nginx_backend.py": ["/cache_purge_hooks/backends/nginxbackend.py"], "/tests/test_import_backend.py": ["/cache_purge_hooks/manager.py"], "/tests/test_hooks_get_called.py": ["/sampleproject/sample/models.py"], "/cache_purge_hooks/shortcuts.py": ["/cache_purge_hooks/manager.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.