index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
67,285 | RyanKung/qubit | refs/heads/master | /qubit/signals/signals.py | from blinker import signal
clear_flying_qubit_cache = signal('clear-fyling-qubit-cache')
| {"/qubit/__main__.py": ["/qubit/wsgiapp.py"], "/qubit/types/__init__.py": ["/qubit/types/qubit.py", "/qubit/types/states.py"], "/qubit/apis/states.py": ["/qubit/types/__init__.py", "/qubit/apis/utils.py"], "/qubit/io/celery/config.py": ["/qubit/config.py"], "/tests/apis/__init__.py": ["/qubit/wsgiapp.py"], "/qubit/io/celery/__init__.py": ["/qubit/io/celery/utils.py", "/qubit/io/celery/types.py"], "/qubit/io/postgres/postgres.py": ["/qubit/config.py"], "/qubit/measure/__init__.py": ["/qubit/measure/pandas.py"], "/qubit/io/postgres/__init__.py": ["/qubit/io/postgres/postgres.py", "/qubit/io/postgres/queryset.py"], "/qubit/views/admin.py": ["/qubit/core/app.py"], "/qubit/io/postgres/queryset.py": ["/qubit/io/postgres/__init__.py", "/qubit/io/postgres/postgres.py", "/qubit/utils.py"], "/qubit/types/states.py": ["/qubit/core/utils.py", "/qubit/measure/__init__.py", "/qubit/io/postgres/__init__.py", "/qubit/io/redis/__init__.py", "/qubit/types/utils.py"], "/qubit/io/redis/__init__.py": ["/qubit/config.py"], "/tests/types/test_crud.py": ["/qubit/types/__init__.py"], "/tests/apis/test_curd.py": ["/tests/apis/__init__.py"], "/tests/io/test_db.py": ["/qubit/io/postgres/__init__.py"], "/qubit/__init__.py": ["/qubit/wsgiapp.py"], "/tests/apis/test_cpu.py": ["/tests/apis/__init__.py"], "/tests/__init__.py": ["/qubit/io/postgres/__init__.py", "/schema/utils.py"], "/schema/utils.py": ["/qubit/io/postgres/__init__.py"], "/qubit/apis/qubit.py": ["/qubit/types/__init__.py", "/qubit/types/utils.py", "/qubit/apis/utils.py"], "/qubit/wsgiapp.py": ["/qubit/io/celery/__init__.py", "/qubit/middleware/__init__.py", "/qubit/types/__init__.py", "/qubit/apis/__init__.py", "/qubit/views/__init__.py"], "/qubit/core/app.py": ["/qubit/config.py"]} |
67,286 | RyanKung/qubit | refs/heads/master | /qubit/middleware/__init__.py | from .eventsocket import QubitSocket
__all__ = ['middleware']
middleware = [QubitSocket]
| {"/qubit/__main__.py": ["/qubit/wsgiapp.py"], "/qubit/types/__init__.py": ["/qubit/types/qubit.py", "/qubit/types/states.py"], "/qubit/apis/states.py": ["/qubit/types/__init__.py", "/qubit/apis/utils.py"], "/qubit/io/celery/config.py": ["/qubit/config.py"], "/tests/apis/__init__.py": ["/qubit/wsgiapp.py"], "/qubit/io/celery/__init__.py": ["/qubit/io/celery/utils.py", "/qubit/io/celery/types.py"], "/qubit/io/postgres/postgres.py": ["/qubit/config.py"], "/qubit/measure/__init__.py": ["/qubit/measure/pandas.py"], "/qubit/io/postgres/__init__.py": ["/qubit/io/postgres/postgres.py", "/qubit/io/postgres/queryset.py"], "/qubit/views/admin.py": ["/qubit/core/app.py"], "/qubit/io/postgres/queryset.py": ["/qubit/io/postgres/__init__.py", "/qubit/io/postgres/postgres.py", "/qubit/utils.py"], "/qubit/types/states.py": ["/qubit/core/utils.py", "/qubit/measure/__init__.py", "/qubit/io/postgres/__init__.py", "/qubit/io/redis/__init__.py", "/qubit/types/utils.py"], "/qubit/io/redis/__init__.py": ["/qubit/config.py"], "/tests/types/test_crud.py": ["/qubit/types/__init__.py"], "/tests/apis/test_curd.py": ["/tests/apis/__init__.py"], "/tests/io/test_db.py": ["/qubit/io/postgres/__init__.py"], "/qubit/__init__.py": ["/qubit/wsgiapp.py"], "/tests/apis/test_cpu.py": ["/tests/apis/__init__.py"], "/tests/__init__.py": ["/qubit/io/postgres/__init__.py", "/schema/utils.py"], "/schema/utils.py": ["/qubit/io/postgres/__init__.py"], "/qubit/apis/qubit.py": ["/qubit/types/__init__.py", "/qubit/types/utils.py", "/qubit/apis/utils.py"], "/qubit/wsgiapp.py": ["/qubit/io/celery/__init__.py", "/qubit/middleware/__init__.py", "/qubit/types/__init__.py", "/qubit/apis/__init__.py", "/qubit/views/__init__.py"], "/qubit/core/app.py": ["/qubit/config.py"]} |
67,287 | RyanKung/qubit | refs/heads/master | /schema/utils.py | from qubit.io.postgres import connection
from postgresql.driver.pq3 import Connection
__all__ = ['execute_file']
def execute_file(filename: str, conn: Connection) -> Connection:
'''
Execute a SQL file
'''
conn = connection()
with open(filename, 'r') as f:
cur = conn.cursor()
data = ''.join(map(lambda l: l.strip(), f.readlines()))
[cur.execute(q) or print(q) for q in data.split(';') if q]
conn.commit()
return conn
| {"/qubit/__main__.py": ["/qubit/wsgiapp.py"], "/qubit/types/__init__.py": ["/qubit/types/qubit.py", "/qubit/types/states.py"], "/qubit/apis/states.py": ["/qubit/types/__init__.py", "/qubit/apis/utils.py"], "/qubit/io/celery/config.py": ["/qubit/config.py"], "/tests/apis/__init__.py": ["/qubit/wsgiapp.py"], "/qubit/io/celery/__init__.py": ["/qubit/io/celery/utils.py", "/qubit/io/celery/types.py"], "/qubit/io/postgres/postgres.py": ["/qubit/config.py"], "/qubit/measure/__init__.py": ["/qubit/measure/pandas.py"], "/qubit/io/postgres/__init__.py": ["/qubit/io/postgres/postgres.py", "/qubit/io/postgres/queryset.py"], "/qubit/views/admin.py": ["/qubit/core/app.py"], "/qubit/io/postgres/queryset.py": ["/qubit/io/postgres/__init__.py", "/qubit/io/postgres/postgres.py", "/qubit/utils.py"], "/qubit/types/states.py": ["/qubit/core/utils.py", "/qubit/measure/__init__.py", "/qubit/io/postgres/__init__.py", "/qubit/io/redis/__init__.py", "/qubit/types/utils.py"], "/qubit/io/redis/__init__.py": ["/qubit/config.py"], "/tests/types/test_crud.py": ["/qubit/types/__init__.py"], "/tests/apis/test_curd.py": ["/tests/apis/__init__.py"], "/tests/io/test_db.py": ["/qubit/io/postgres/__init__.py"], "/qubit/__init__.py": ["/qubit/wsgiapp.py"], "/tests/apis/test_cpu.py": ["/tests/apis/__init__.py"], "/tests/__init__.py": ["/qubit/io/postgres/__init__.py", "/schema/utils.py"], "/schema/utils.py": ["/qubit/io/postgres/__init__.py"], "/qubit/apis/qubit.py": ["/qubit/types/__init__.py", "/qubit/types/utils.py", "/qubit/apis/utils.py"], "/qubit/wsgiapp.py": ["/qubit/io/celery/__init__.py", "/qubit/middleware/__init__.py", "/qubit/types/__init__.py", "/qubit/apis/__init__.py", "/qubit/views/__init__.py"], "/qubit/core/app.py": ["/qubit/config.py"]} |
67,288 | RyanKung/qubit | refs/heads/master | /qubit/apis/qubit.py | from qubit.types import Qubit
from qubit.types.utils import ts_data
from qubit.core import app
from flask import request
from .utils import resp_wrapper as wrapper
from .utils import jsonize
__all__ = ['qubit_api', 'entangle', 'stem_api', 'monad_tester']
@app.route('/qubit/<qid>/', methods=['GET', 'DELETE', 'PATCH'])
@app.route('/qubit/', methods=['POST'])
@jsonize
@wrapper
def qubit_api(qid=None):
def create():
return dict(id=Qubit.create(**request.json))
def push():
qubit = Qubit.get(qid)
data = ts_data(**request.json)
Qubit.measure(qubit, data)
def fetch():
res = Qubit.get(qid)
return res and res._asdict()
def update():
return Qubit.update(qid, request.json)
def delete():
return Qubit.manager.delete(qid)
return {
'GET': fetch,
'PUT': push,
'PATCH': update,
'DELETE': delete,
'POST': create
}.get(request.method)()
@app.route('/qubit/monad/test/', methods=['POST'])
@jsonize
@wrapper
def monad_tester():
loc = {}
try:
Qubit.exec_monad(request.json['monad'], loc=loc)
return dict(data=loc)
except Exception as e:
return {
'data': dict(ex=e)
}
@app.route('/qubit/stem/', methods=['GET', 'POST'])
@jsonize
@wrapper
def stem_api(name=None):
def get():
stems = Qubit.get_stem()
return stems and list(map(lambda x: x._asdict(), stems))
return {
'GET': get
}.get(request.method)()
@app.route('/qubit/<qid>/last/', methods=['GET'])
@jsonize
@wrapper
def last(qid):
return Qubit.get_current(qid)
@app.route('/qubit/entangle/<entangle>/', methods=['GET'])
@app.route('/qubit/entangle/<qid>/', methods=['POST'])
@jsonize
@wrapper
def entangle(qid=None, entangle=None):
def set_entangle():
data = request.json
return Qubit.entangle(qid, data['id'])
def get_entangle():
return list(map(lambda x: x, Qubit.get_flying(entangle)))
return {
'GET': get_entangle,
'POST': set_entangle
}.get(request.method)()
@app.route('/qubit/entangle/<entangle>/tree/', methods=['GET'])
@jsonize
@wrapper
def entangle_tree(entangle):
def get_entangle_tree(entangle):
res = [list(map(lambda x: x, Qubit.get_flying(entangle)))]
return res or res + list(map(get_entangle_tree, res))
return get_entangle_tree(entangle)
| {"/qubit/__main__.py": ["/qubit/wsgiapp.py"], "/qubit/types/__init__.py": ["/qubit/types/qubit.py", "/qubit/types/states.py"], "/qubit/apis/states.py": ["/qubit/types/__init__.py", "/qubit/apis/utils.py"], "/qubit/io/celery/config.py": ["/qubit/config.py"], "/tests/apis/__init__.py": ["/qubit/wsgiapp.py"], "/qubit/io/celery/__init__.py": ["/qubit/io/celery/utils.py", "/qubit/io/celery/types.py"], "/qubit/io/postgres/postgres.py": ["/qubit/config.py"], "/qubit/measure/__init__.py": ["/qubit/measure/pandas.py"], "/qubit/io/postgres/__init__.py": ["/qubit/io/postgres/postgres.py", "/qubit/io/postgres/queryset.py"], "/qubit/views/admin.py": ["/qubit/core/app.py"], "/qubit/io/postgres/queryset.py": ["/qubit/io/postgres/__init__.py", "/qubit/io/postgres/postgres.py", "/qubit/utils.py"], "/qubit/types/states.py": ["/qubit/core/utils.py", "/qubit/measure/__init__.py", "/qubit/io/postgres/__init__.py", "/qubit/io/redis/__init__.py", "/qubit/types/utils.py"], "/qubit/io/redis/__init__.py": ["/qubit/config.py"], "/tests/types/test_crud.py": ["/qubit/types/__init__.py"], "/tests/apis/test_curd.py": ["/tests/apis/__init__.py"], "/tests/io/test_db.py": ["/qubit/io/postgres/__init__.py"], "/qubit/__init__.py": ["/qubit/wsgiapp.py"], "/tests/apis/test_cpu.py": ["/tests/apis/__init__.py"], "/tests/__init__.py": ["/qubit/io/postgres/__init__.py", "/schema/utils.py"], "/schema/utils.py": ["/qubit/io/postgres/__init__.py"], "/qubit/apis/qubit.py": ["/qubit/types/__init__.py", "/qubit/types/utils.py", "/qubit/apis/utils.py"], "/qubit/wsgiapp.py": ["/qubit/io/celery/__init__.py", "/qubit/middleware/__init__.py", "/qubit/types/__init__.py", "/qubit/apis/__init__.py", "/qubit/views/__init__.py"], "/qubit/core/app.py": ["/qubit/config.py"]} |
67,289 | RyanKung/qubit | refs/heads/master | /qubit/types/utils.py | from collections import namedtuple
import datetime
from dateutil.relativedelta import relativedelta
from itertools import starmap
from operator import mul
__all__ = ['ts_data', 'empty_ts_data', 'DateRange']
ts_data = namedtuple('data', ['datum', 'ts'])
def empty_ts_data():
return ts_data(datum={}, ts=str(datetime.datetime.now()))
class DateRange():
def __init__(self, period, cycle, gap=1):
self.now = datetime.datetime.now()
self.period = period
self.gap = gap
submask = dict(
years=((1, 0, 0, 0, 0, 0), (1, 1, 1, 0, 0, 0)),
months=((1, 1, 0, 0, 0, 0), (1, 1, 1, 0, 0, 0)),
days=((1, 1, 1, 0, 0, 0), (1, 1, 1, 0, 0, 0)),
hours=((1, 1, 1, 1, 0, 0), (1, 1, 1, 1, 0, 0)),
minutes=((1, 1, 1, 1, 1, 0), (1, 1, 1, 1, 1, 0)),
seconds=((1, 1, 1, 1, 1, 1), (1, 1, 1, 1, 1, 1))
).get(self.period)
start_tuple = tuple(
(self.now - relativedelta(**{period: cycle})).timetuple())[: 6]
self.start = datetime.datetime(*tuple(starmap(mul, zip(
starmap(pow, zip(start_tuple, submask[0])), submask[1]))))
def __call__(self):
return self
def __iter__(self):
return self
def __str__(self):
return "<DataRange from: %s to: %s with: %s/%s>" % (
self.start, self.now, self.period, self.gap)
def __next__(self):
if self.start > self.now:
raise StopIteration('Done')
start = self.start
end = start + relativedelta(**{self.period: self.gap})
if end > self.now:
res = (start, self.now)
else:
res = (start, end)
self.start = end
return res
| {"/qubit/__main__.py": ["/qubit/wsgiapp.py"], "/qubit/types/__init__.py": ["/qubit/types/qubit.py", "/qubit/types/states.py"], "/qubit/apis/states.py": ["/qubit/types/__init__.py", "/qubit/apis/utils.py"], "/qubit/io/celery/config.py": ["/qubit/config.py"], "/tests/apis/__init__.py": ["/qubit/wsgiapp.py"], "/qubit/io/celery/__init__.py": ["/qubit/io/celery/utils.py", "/qubit/io/celery/types.py"], "/qubit/io/postgres/postgres.py": ["/qubit/config.py"], "/qubit/measure/__init__.py": ["/qubit/measure/pandas.py"], "/qubit/io/postgres/__init__.py": ["/qubit/io/postgres/postgres.py", "/qubit/io/postgres/queryset.py"], "/qubit/views/admin.py": ["/qubit/core/app.py"], "/qubit/io/postgres/queryset.py": ["/qubit/io/postgres/__init__.py", "/qubit/io/postgres/postgres.py", "/qubit/utils.py"], "/qubit/types/states.py": ["/qubit/core/utils.py", "/qubit/measure/__init__.py", "/qubit/io/postgres/__init__.py", "/qubit/io/redis/__init__.py", "/qubit/types/utils.py"], "/qubit/io/redis/__init__.py": ["/qubit/config.py"], "/tests/types/test_crud.py": ["/qubit/types/__init__.py"], "/tests/apis/test_curd.py": ["/tests/apis/__init__.py"], "/tests/io/test_db.py": ["/qubit/io/postgres/__init__.py"], "/qubit/__init__.py": ["/qubit/wsgiapp.py"], "/tests/apis/test_cpu.py": ["/tests/apis/__init__.py"], "/tests/__init__.py": ["/qubit/io/postgres/__init__.py", "/schema/utils.py"], "/schema/utils.py": ["/qubit/io/postgres/__init__.py"], "/qubit/apis/qubit.py": ["/qubit/types/__init__.py", "/qubit/types/utils.py", "/qubit/apis/utils.py"], "/qubit/wsgiapp.py": ["/qubit/io/celery/__init__.py", "/qubit/middleware/__init__.py", "/qubit/types/__init__.py", "/qubit/apis/__init__.py", "/qubit/views/__init__.py"], "/qubit/core/app.py": ["/qubit/config.py"]} |
67,290 | RyanKung/qubit | refs/heads/master | /qubit/types/qubit.py | #! -*- eval: (venv-workon "qubit"); -*-
import json
import runpy
from types import ModuleType
from functools import partial
from qubit.io.pulsar import async
from qubit.io.postgres import types
from qubit.io.postgres import QuerySet
from qubit.io.celery import Entanglement
from qubit.io.celery import queue
from qubit.io.celery import task_method
from qubit.io.celery import period_task
from qubit.io.redis import client, cache, clear
from qubit.types.utils import ts_data, empty_ts_data
from qubit.types.states import States
from qubit.signals.signals import clear_flying_qubit_cache
__all__ = ['Qubit']
tell_client = partial(client.publish, 'eventsocket')
@clear_flying_qubit_cache.connect
def clear_flying_cache():
clear('flying')
class QubitEntanglement(Entanglement):
abstract = True
def on_success(self, res, task_id, args, kwargs):
pass
class Qubit(object):
prototype = types.Table('qubit', [
('id', types.integer),
('name', types.varchar),
('entangle', types.varchar),
('is_stem', types.boolean),
('is_spout', types.boolean),
('monad', types.text),
('store', types.boolean),
('comment', types.text),
('flying', types.boolean),
('rate', types.integer)
])
manager = QuerySet(prototype)
@classmethod
def create(cls, name, entangle=None,
flying=True, is_stem=False, is_spout=False,
store=False, *args, **kwargs):
qid = cls.manager.insert(
name=name,
entangle=entangle,
is_stem=is_stem,
is_spout=is_spout,
store=store,
flying=flying, *args, **kwargs)
if qid and is_stem:
tell_client('new_stem')
clear('flying')
return qid
@staticmethod
def require(name, *args, **kwargs):
if '.py' not in name:
name = name + '.py'
module_dict = runpy.run_path(name)
module = ModuleType(name)
list(map(lambda x: setattr(module, *x), module_dict.items()))
return module
@staticmethod
def __import__(name, *args, **kwargs):
whitelist = ['functools', 'operator',
'psutil',
'pandas', 'itertools']
if name not in whitelist:
return NotImplementedError
return __import__(name, *args, **kwargs)
@classmethod
def send_data(cls, qid: str, data: str):
assert isinstance(data, str)
sck_name = 'qubitsocket::%s' % str(qid)
client.publish(sck_name, json.dumps({'qid': qid, 'data': data}))
@classmethod
def update(cls, qid: str, data: dict):
return cls.manager.update(qid, **data)
@classmethod
def get(cls, qid):
return cls.prototype(**cls.manager.get(qid))
@staticmethod
def exec(qubit, data):
qubit, data = Qubit.format(qubit, data)
builtins = dict(__builtins__,
require=Qubit.require,
__import__=Qubit.__import__)
glo = {
'datum': data.datum,
'__builtins__': builtins
}
loc = {
'qubit': qubit
}
Qubit.exec_monad(qubit.monad, glo, loc)
datum = loc['datum']
if not isinstance(datum, dict):
datum = dict(raw=datum)
return datum
@staticmethod
def exec_monad(monad: str, glo={}, loc={}):
builtins = dict(__builtins__,
require=Qubit.require,
__import__=Qubit.__import__)
glo = glo or {
'__builtins__': builtins
}
exec(monad, glo, loc)
@staticmethod
@queue.task(filter=task_method, base=QubitEntanglement)
@async
def activate(qubit, data={}):
qubit, data = Qubit.format(qubit, data)
datum = Qubit.exec(qubit, data)
data = ts_data(datum=datum, ts=data.ts)
print(qubit._asdict())
qubit.store and States.create(
qubit=qubit.id,
datum=json.dumps(datum),
ts=data.ts,
tags=[])
Qubit.set_current(qubit.id, data)
Qubit.trigger(qubit=qubit, data=data)
@classmethod
def activate_all(cls):
return list(map(
cls.entanglement_trigger, cls.get_spouts()))
@classmethod
@cache(ttl=5000, flag='spout')
def get_flying(cls, entangle):
qubits = cls.manager.filter(
entangle=entangle,
flying=True)
if not qubits:
return []
print(qubits)
return list(map(lambda x: cls.prototype(**x)._asdict(), qubits))
@classmethod
def get_spouts(cls):
cached = client.get('qubit::spout_cache')
return cached or list(map(
cls.format_qubit,
cls.manager.filter(flying=True, is_spout=True)))
@classmethod
def get_stem(cls):
return list(map(
cls.format_qubit,
cls.manager.filter(flying=True, is_stem=True)))
@classmethod
def delete(cls, qubit_id):
clear('flying')
return (cls.manager.delete(qubit_id) and
States.manager.delete_by(qubit=qubit_id))
@classmethod
def get_spem(cls, qid):
'''
Warning: A recursion calling
'''
qubit = cls.format_qubit(cls.get(qid))
if qubit.is_stem:
return qid
kind, qid = qubit.entangle.split(':')
if not kind == 'stem':
return cls.get_spem(qid)
else:
return qid
@classmethod
def set_current(cls, qid, ts_data):
data = json.dumps(ts_data._asdict())
cls.send_data(qid, data)
key = 'qubit:%s:state' % qid
client.set(key, data)
return True
@classmethod
def get_current(cls, qid):
key = 'qubit:%s:state' % qid
data = client.get(key)
if not data:
return empty_ts_data()
return ts_data(**json.loads(data.decode()))
@classmethod
@cache(ttl=10000, flag='spout')
def get_by(cls, name):
return cls.manager.get_by(name=name)
@staticmethod
def format(qubit, data):
return (Qubit.format_qubit(qubit),
Qubit.format_data(data))
@staticmethod
def format_qubit(qubit):
if isinstance(qubit, dict):
print(qubit)
qubit = Qubit.prototype(**qubit)
if isinstance(qubit, list):
qubit = Qubit.prototype(
**dict(zip(Qubit.prototype._fields, qubit)))
return qubit
@staticmethod
def format_data(data):
if not data:
return empty_ts_data()
if isinstance(data, dict):
data = ts_data(**data)
if isinstance(data, list):
data = ts_data(
**dict(zip(ts_data._fields, data)))
return data
@staticmethod
def entanglement_trigger(qubit, data={}):
Qubit.activate.task.delay(
qubit=qubit, data=data)
@staticmethod
def measure(qubit, depth):
States.get_history(qubit.id, depth)
@classmethod
def trigger(cls, qubit, data):
qubit, data = cls.format(qubit, data)
name = qubit.is_stem and 'Stem' or 'Qubit'
sig_name = '%s:%s' % (name, qubit.id)
qubits = map(lambda x: cls.format_qubit(
x)._asdict(), Qubit.get_flying(sig_name))
if not qubits:
return False
res = list(map(partial(
Qubit.entanglement_trigger,
data=isinstance(data, dict) and data or data._asdict()), qubits))
return res
@classmethod
def entangle(cls, qid1, qid2):
sig_name = 'Qubit:%s' % qid2
return cls.manager.update(qid1, entangle=sig_name)
@staticmethod
@partial(period_task, name='spout', period=300)
@queue.task(filter=task_method)
def activate_period_task():
return Qubit.activate_all()
@classmethod
def pick_status(cls, qid, ts):
return States.pick(qid, ts)
| {"/qubit/__main__.py": ["/qubit/wsgiapp.py"], "/qubit/types/__init__.py": ["/qubit/types/qubit.py", "/qubit/types/states.py"], "/qubit/apis/states.py": ["/qubit/types/__init__.py", "/qubit/apis/utils.py"], "/qubit/io/celery/config.py": ["/qubit/config.py"], "/tests/apis/__init__.py": ["/qubit/wsgiapp.py"], "/qubit/io/celery/__init__.py": ["/qubit/io/celery/utils.py", "/qubit/io/celery/types.py"], "/qubit/io/postgres/postgres.py": ["/qubit/config.py"], "/qubit/measure/__init__.py": ["/qubit/measure/pandas.py"], "/qubit/io/postgres/__init__.py": ["/qubit/io/postgres/postgres.py", "/qubit/io/postgres/queryset.py"], "/qubit/views/admin.py": ["/qubit/core/app.py"], "/qubit/io/postgres/queryset.py": ["/qubit/io/postgres/__init__.py", "/qubit/io/postgres/postgres.py", "/qubit/utils.py"], "/qubit/types/states.py": ["/qubit/core/utils.py", "/qubit/measure/__init__.py", "/qubit/io/postgres/__init__.py", "/qubit/io/redis/__init__.py", "/qubit/types/utils.py"], "/qubit/io/redis/__init__.py": ["/qubit/config.py"], "/tests/types/test_crud.py": ["/qubit/types/__init__.py"], "/tests/apis/test_curd.py": ["/tests/apis/__init__.py"], "/tests/io/test_db.py": ["/qubit/io/postgres/__init__.py"], "/qubit/__init__.py": ["/qubit/wsgiapp.py"], "/tests/apis/test_cpu.py": ["/tests/apis/__init__.py"], "/tests/__init__.py": ["/qubit/io/postgres/__init__.py", "/schema/utils.py"], "/schema/utils.py": ["/qubit/io/postgres/__init__.py"], "/qubit/apis/qubit.py": ["/qubit/types/__init__.py", "/qubit/types/utils.py", "/qubit/apis/utils.py"], "/qubit/wsgiapp.py": ["/qubit/io/celery/__init__.py", "/qubit/middleware/__init__.py", "/qubit/types/__init__.py", "/qubit/apis/__init__.py", "/qubit/views/__init__.py"], "/qubit/core/app.py": ["/qubit/config.py"]} |
67,291 | RyanKung/qubit | refs/heads/master | /qubit/types/function.py | __all__ = ['Function']
class Function(object):
@classmethod
def create(cls, name, body, side_effect=False, *args, **kwargs):
return cls.manager.insert(name=name,
body=body,
side_effect=side_effect,
*args, **kwargs)
@classmethod
def format(cls, raw: dict):
if not raw:
return None
return cls.prototype(**raw)
@classmethod
def get_raw(cls, mid):
return cls.format(cls.manager.get(mid))
@classmethod
def activate(cls, func):
glo = {'__import__': cls.__import__}
return eval(func.body, glo)
@classmethod
def __import__(cls, s: str):
if s or s not in ['os', 'sys']:
return __import__(s)
else:
raise NotImplementedError
@classmethod
def get(cls, mid):
return cls.activate(cls.get_raw(mid))
@classmethod
def get_list(cls, size=100, offset=0, sort_key=''):
return cls.manager.get_list()
@classmethod
def delete(cls, mid):
return cls.mapper.delete(id=mid)
| {"/qubit/__main__.py": ["/qubit/wsgiapp.py"], "/qubit/types/__init__.py": ["/qubit/types/qubit.py", "/qubit/types/states.py"], "/qubit/apis/states.py": ["/qubit/types/__init__.py", "/qubit/apis/utils.py"], "/qubit/io/celery/config.py": ["/qubit/config.py"], "/tests/apis/__init__.py": ["/qubit/wsgiapp.py"], "/qubit/io/celery/__init__.py": ["/qubit/io/celery/utils.py", "/qubit/io/celery/types.py"], "/qubit/io/postgres/postgres.py": ["/qubit/config.py"], "/qubit/measure/__init__.py": ["/qubit/measure/pandas.py"], "/qubit/io/postgres/__init__.py": ["/qubit/io/postgres/postgres.py", "/qubit/io/postgres/queryset.py"], "/qubit/views/admin.py": ["/qubit/core/app.py"], "/qubit/io/postgres/queryset.py": ["/qubit/io/postgres/__init__.py", "/qubit/io/postgres/postgres.py", "/qubit/utils.py"], "/qubit/types/states.py": ["/qubit/core/utils.py", "/qubit/measure/__init__.py", "/qubit/io/postgres/__init__.py", "/qubit/io/redis/__init__.py", "/qubit/types/utils.py"], "/qubit/io/redis/__init__.py": ["/qubit/config.py"], "/tests/types/test_crud.py": ["/qubit/types/__init__.py"], "/tests/apis/test_curd.py": ["/tests/apis/__init__.py"], "/tests/io/test_db.py": ["/qubit/io/postgres/__init__.py"], "/qubit/__init__.py": ["/qubit/wsgiapp.py"], "/tests/apis/test_cpu.py": ["/tests/apis/__init__.py"], "/tests/__init__.py": ["/qubit/io/postgres/__init__.py", "/schema/utils.py"], "/schema/utils.py": ["/qubit/io/postgres/__init__.py"], "/qubit/apis/qubit.py": ["/qubit/types/__init__.py", "/qubit/types/utils.py", "/qubit/apis/utils.py"], "/qubit/wsgiapp.py": ["/qubit/io/celery/__init__.py", "/qubit/middleware/__init__.py", "/qubit/types/__init__.py", "/qubit/apis/__init__.py", "/qubit/views/__init__.py"], "/qubit/core/app.py": ["/qubit/config.py"]} |
67,292 | RyanKung/qubit | refs/heads/master | /qubit/wsgiapp.py | from qubit.io.celery import queue
from qubit.core import app
from qubit.middleware import middleware
import qubit.types as types
import qubit.apis as apis
import qubit.views as views
__all__ = ['app', 'apis', 'queue', 'types', 'views', 'middleware']
| {"/qubit/__main__.py": ["/qubit/wsgiapp.py"], "/qubit/types/__init__.py": ["/qubit/types/qubit.py", "/qubit/types/states.py"], "/qubit/apis/states.py": ["/qubit/types/__init__.py", "/qubit/apis/utils.py"], "/qubit/io/celery/config.py": ["/qubit/config.py"], "/tests/apis/__init__.py": ["/qubit/wsgiapp.py"], "/qubit/io/celery/__init__.py": ["/qubit/io/celery/utils.py", "/qubit/io/celery/types.py"], "/qubit/io/postgres/postgres.py": ["/qubit/config.py"], "/qubit/measure/__init__.py": ["/qubit/measure/pandas.py"], "/qubit/io/postgres/__init__.py": ["/qubit/io/postgres/postgres.py", "/qubit/io/postgres/queryset.py"], "/qubit/views/admin.py": ["/qubit/core/app.py"], "/qubit/io/postgres/queryset.py": ["/qubit/io/postgres/__init__.py", "/qubit/io/postgres/postgres.py", "/qubit/utils.py"], "/qubit/types/states.py": ["/qubit/core/utils.py", "/qubit/measure/__init__.py", "/qubit/io/postgres/__init__.py", "/qubit/io/redis/__init__.py", "/qubit/types/utils.py"], "/qubit/io/redis/__init__.py": ["/qubit/config.py"], "/tests/types/test_crud.py": ["/qubit/types/__init__.py"], "/tests/apis/test_curd.py": ["/tests/apis/__init__.py"], "/tests/io/test_db.py": ["/qubit/io/postgres/__init__.py"], "/qubit/__init__.py": ["/qubit/wsgiapp.py"], "/tests/apis/test_cpu.py": ["/tests/apis/__init__.py"], "/tests/__init__.py": ["/qubit/io/postgres/__init__.py", "/schema/utils.py"], "/schema/utils.py": ["/qubit/io/postgres/__init__.py"], "/qubit/apis/qubit.py": ["/qubit/types/__init__.py", "/qubit/types/utils.py", "/qubit/apis/utils.py"], "/qubit/wsgiapp.py": ["/qubit/io/celery/__init__.py", "/qubit/middleware/__init__.py", "/qubit/types/__init__.py", "/qubit/apis/__init__.py", "/qubit/views/__init__.py"], "/qubit/core/app.py": ["/qubit/config.py"]} |
67,293 | RyanKung/qubit | refs/heads/master | /qubit/apis/__init__.py | from . import qubit
from . import states
__all__ = ['qubit', 'states']
| {"/qubit/__main__.py": ["/qubit/wsgiapp.py"], "/qubit/types/__init__.py": ["/qubit/types/qubit.py", "/qubit/types/states.py"], "/qubit/apis/states.py": ["/qubit/types/__init__.py", "/qubit/apis/utils.py"], "/qubit/io/celery/config.py": ["/qubit/config.py"], "/tests/apis/__init__.py": ["/qubit/wsgiapp.py"], "/qubit/io/celery/__init__.py": ["/qubit/io/celery/utils.py", "/qubit/io/celery/types.py"], "/qubit/io/postgres/postgres.py": ["/qubit/config.py"], "/qubit/measure/__init__.py": ["/qubit/measure/pandas.py"], "/qubit/io/postgres/__init__.py": ["/qubit/io/postgres/postgres.py", "/qubit/io/postgres/queryset.py"], "/qubit/views/admin.py": ["/qubit/core/app.py"], "/qubit/io/postgres/queryset.py": ["/qubit/io/postgres/__init__.py", "/qubit/io/postgres/postgres.py", "/qubit/utils.py"], "/qubit/types/states.py": ["/qubit/core/utils.py", "/qubit/measure/__init__.py", "/qubit/io/postgres/__init__.py", "/qubit/io/redis/__init__.py", "/qubit/types/utils.py"], "/qubit/io/redis/__init__.py": ["/qubit/config.py"], "/tests/types/test_crud.py": ["/qubit/types/__init__.py"], "/tests/apis/test_curd.py": ["/tests/apis/__init__.py"], "/tests/io/test_db.py": ["/qubit/io/postgres/__init__.py"], "/qubit/__init__.py": ["/qubit/wsgiapp.py"], "/tests/apis/test_cpu.py": ["/tests/apis/__init__.py"], "/tests/__init__.py": ["/qubit/io/postgres/__init__.py", "/schema/utils.py"], "/schema/utils.py": ["/qubit/io/postgres/__init__.py"], "/qubit/apis/qubit.py": ["/qubit/types/__init__.py", "/qubit/types/utils.py", "/qubit/apis/utils.py"], "/qubit/wsgiapp.py": ["/qubit/io/celery/__init__.py", "/qubit/middleware/__init__.py", "/qubit/types/__init__.py", "/qubit/apis/__init__.py", "/qubit/views/__init__.py"], "/qubit/core/app.py": ["/qubit/config.py"]} |
67,294 | RyanKung/qubit | refs/heads/master | /qubit/core/app.py | import flask
from qubit.config import STATIC_PATH, STATIC_URL
__all__ = ['app']
app = flask.Flask(
'qubit',
static_url_path=STATIC_URL,
static_folder=STATIC_PATH)
| {"/qubit/__main__.py": ["/qubit/wsgiapp.py"], "/qubit/types/__init__.py": ["/qubit/types/qubit.py", "/qubit/types/states.py"], "/qubit/apis/states.py": ["/qubit/types/__init__.py", "/qubit/apis/utils.py"], "/qubit/io/celery/config.py": ["/qubit/config.py"], "/tests/apis/__init__.py": ["/qubit/wsgiapp.py"], "/qubit/io/celery/__init__.py": ["/qubit/io/celery/utils.py", "/qubit/io/celery/types.py"], "/qubit/io/postgres/postgres.py": ["/qubit/config.py"], "/qubit/measure/__init__.py": ["/qubit/measure/pandas.py"], "/qubit/io/postgres/__init__.py": ["/qubit/io/postgres/postgres.py", "/qubit/io/postgres/queryset.py"], "/qubit/views/admin.py": ["/qubit/core/app.py"], "/qubit/io/postgres/queryset.py": ["/qubit/io/postgres/__init__.py", "/qubit/io/postgres/postgres.py", "/qubit/utils.py"], "/qubit/types/states.py": ["/qubit/core/utils.py", "/qubit/measure/__init__.py", "/qubit/io/postgres/__init__.py", "/qubit/io/redis/__init__.py", "/qubit/types/utils.py"], "/qubit/io/redis/__init__.py": ["/qubit/config.py"], "/tests/types/test_crud.py": ["/qubit/types/__init__.py"], "/tests/apis/test_curd.py": ["/tests/apis/__init__.py"], "/tests/io/test_db.py": ["/qubit/io/postgres/__init__.py"], "/qubit/__init__.py": ["/qubit/wsgiapp.py"], "/tests/apis/test_cpu.py": ["/tests/apis/__init__.py"], "/tests/__init__.py": ["/qubit/io/postgres/__init__.py", "/schema/utils.py"], "/schema/utils.py": ["/qubit/io/postgres/__init__.py"], "/qubit/apis/qubit.py": ["/qubit/types/__init__.py", "/qubit/types/utils.py", "/qubit/apis/utils.py"], "/qubit/wsgiapp.py": ["/qubit/io/celery/__init__.py", "/qubit/middleware/__init__.py", "/qubit/types/__init__.py", "/qubit/apis/__init__.py", "/qubit/views/__init__.py"], "/qubit/core/app.py": ["/qubit/config.py"]} |
67,295 | RyanKung/qubit | refs/heads/master | /qubit/apis/utils.py | from concurrent.futures import TimeoutError
from pulsar import ensure_future
import simplejson as json
from functools import wraps
__all__ = ['jsonize', 'resp_wrapper']
def jsonize(fn):
@wraps(fn)
def handler(*args, **kwargs):
data = fn(*args, **kwargs)
return json.dumps(data, ensure_ascii=False,
ignore_nan=True, namedtuple_as_object=True,
default=str)
return handler
def resp_wrapper(fn):
@wraps(fn)
def handler(*args, **kwargs):
try:
resp = fn(*args, **kwargs)
except TimeoutError:
resp = {}
if not isinstance(resp, dict):
resp = dict(data=resp)
return dict(resp, result='ok', status_code='200')
return handler
| {"/qubit/__main__.py": ["/qubit/wsgiapp.py"], "/qubit/types/__init__.py": ["/qubit/types/qubit.py", "/qubit/types/states.py"], "/qubit/apis/states.py": ["/qubit/types/__init__.py", "/qubit/apis/utils.py"], "/qubit/io/celery/config.py": ["/qubit/config.py"], "/tests/apis/__init__.py": ["/qubit/wsgiapp.py"], "/qubit/io/celery/__init__.py": ["/qubit/io/celery/utils.py", "/qubit/io/celery/types.py"], "/qubit/io/postgres/postgres.py": ["/qubit/config.py"], "/qubit/measure/__init__.py": ["/qubit/measure/pandas.py"], "/qubit/io/postgres/__init__.py": ["/qubit/io/postgres/postgres.py", "/qubit/io/postgres/queryset.py"], "/qubit/views/admin.py": ["/qubit/core/app.py"], "/qubit/io/postgres/queryset.py": ["/qubit/io/postgres/__init__.py", "/qubit/io/postgres/postgres.py", "/qubit/utils.py"], "/qubit/types/states.py": ["/qubit/core/utils.py", "/qubit/measure/__init__.py", "/qubit/io/postgres/__init__.py", "/qubit/io/redis/__init__.py", "/qubit/types/utils.py"], "/qubit/io/redis/__init__.py": ["/qubit/config.py"], "/tests/types/test_crud.py": ["/qubit/types/__init__.py"], "/tests/apis/test_curd.py": ["/tests/apis/__init__.py"], "/tests/io/test_db.py": ["/qubit/io/postgres/__init__.py"], "/qubit/__init__.py": ["/qubit/wsgiapp.py"], "/tests/apis/test_cpu.py": ["/tests/apis/__init__.py"], "/tests/__init__.py": ["/qubit/io/postgres/__init__.py", "/schema/utils.py"], "/schema/utils.py": ["/qubit/io/postgres/__init__.py"], "/qubit/apis/qubit.py": ["/qubit/types/__init__.py", "/qubit/types/utils.py", "/qubit/apis/utils.py"], "/qubit/wsgiapp.py": ["/qubit/io/celery/__init__.py", "/qubit/middleware/__init__.py", "/qubit/types/__init__.py", "/qubit/apis/__init__.py", "/qubit/views/__init__.py"], "/qubit/core/app.py": ["/qubit/config.py"]} |
67,305 | LyricLy/capncord.py | refs/heads/master | /capncord/http.py | import asyncio
BASE_URL = "https://capnchat.tk"
def route(n):
return BASE_URL + n
async def get(session, route_):
resp = await session.get(route(route_))
if resp.status == 429:
await asyncio.sleep(1)
await get(session, route_)
return resp
async def post(session, route_, *, data=None):
resp = await session.post(route(route_), data=data)
if resp.status == 429:
await asyncio.sleep(1)
await post(session, route_, data=data)
return resp
| {"/capncord/bot.py": ["/capncord/channel.py", "/capncord/http.py", "/capncord/user.py", "/capncord/message.py"], "/capncord/channel.py": ["/capncord/__init__.py", "/capncord/http.py", "/capncord/user.py"], "/capncord/__init__.py": ["/capncord/bot.py", "/capncord/channel.py", "/capncord/user.py", "/capncord/message.py"], "/capncord/user.py": ["/capncord/http.py"], "/capncord/message.py": ["/capncord/__init__.py", "/capncord/user.py"]} |
67,306 | LyricLy/capncord.py | refs/heads/master | /capncord/bot.py | from .channel import Channel
from .http import get, post
from .user import User
from .message import Message
import asyncio
import json
from collections import defaultdict
import aiohttp
import websockets
class Bot:
def __init__(self):
self.loop = asyncio.get_event_loop()
self.session = None
self.user = None
self.listeners = {"message": [self.on_message], "ready": [self.on_ready]}
self.lengths = defaultdict(int)
async def on_message(self, message):
pass
async def on_ready(self):
pass
async def wait_for(self, event, *, timeout=None, check=None):
fut = asyncio.Future()
async def listener(*args):
if check and check(*args):
if len(args) == 1:
fut.set_result(args[0])
else:
fut.set_result(args)
self.listeners[event].append(listener)
return await asyncio.wait_for(fut, timeout=timeout)
async def check_for_messages(self):
while not hasattr(self, "ws"):
await asyncio.sleep(0)
while True:
msg = json.loads(await self.ws.recv())
await self.trigger_event("message", Message.from_data(self, msg))
def get_channel(self, id_):
return Channel(self, id_)
async def history(self):
resp = await get(self.session, "/serial_chat")
data = await resp.json()
return [Message.from_data(self, msg) for msg in data]
async def get_message(self, id_):
resp = await post(self.session, "/get_message", data={"id": id_})
data = await resp.json()
return Message.from_data(self, data)
async def trigger_event(self, event, *args):
for listener in self.listeners[event]:
self.loop.create_task(listener(*args))
async def prep(self, token):
self.ws = await websockets.client.connect("ws://capnchat.tk:8000/ws")
self.session = aiohttp.ClientSession(headers={"User-Agent": "capn.py", "token": token}, loop=self.loop)
resp = await get(self.session, "/id")
data = await resp.json()
self.user = User(self, data["user_id"])
await self.trigger_event("ready")
def run(self, token):
self.loop.create_task(self.check_for_messages())
self.loop.create_task(self.prep(token))
self.loop.run_forever()
self.session.close()
| {"/capncord/bot.py": ["/capncord/channel.py", "/capncord/http.py", "/capncord/user.py", "/capncord/message.py"], "/capncord/channel.py": ["/capncord/__init__.py", "/capncord/http.py", "/capncord/user.py"], "/capncord/__init__.py": ["/capncord/bot.py", "/capncord/channel.py", "/capncord/user.py", "/capncord/message.py"], "/capncord/user.py": ["/capncord/http.py"], "/capncord/message.py": ["/capncord/__init__.py", "/capncord/user.py"]} |
67,307 | LyricLy/capncord.py | refs/heads/master | /capncord/channel.py | from . import message
from .http import get, post
from .user import User
class Channel:
def __init__(self, bot, id_):
self.bot = bot
self.id = id_
async def send(self, content):
await post(self.bot.session, "/chat", data={"text": content, "channel": self.id})
async def history(self):
resp = await get(self.bot.session, "/serial_chat")
data = await resp.json()
return [message.Message.from_data(self.bot, msg) for msg in data if msg["channel"] == self.id]
def __eq__(self, other):
if not isinstance(other, Channel):
return NotImplemented
return self.id == other.id
def __hash__(self):
return self.id
| {"/capncord/bot.py": ["/capncord/channel.py", "/capncord/http.py", "/capncord/user.py", "/capncord/message.py"], "/capncord/channel.py": ["/capncord/__init__.py", "/capncord/http.py", "/capncord/user.py"], "/capncord/__init__.py": ["/capncord/bot.py", "/capncord/channel.py", "/capncord/user.py", "/capncord/message.py"], "/capncord/user.py": ["/capncord/http.py"], "/capncord/message.py": ["/capncord/__init__.py", "/capncord/user.py"]} |
67,308 | LyricLy/capncord.py | refs/heads/master | /capncord/__init__.py | from .bot import Bot
from .channel import Channel
from .user import User
from .message import Message
| {"/capncord/bot.py": ["/capncord/channel.py", "/capncord/http.py", "/capncord/user.py", "/capncord/message.py"], "/capncord/channel.py": ["/capncord/__init__.py", "/capncord/http.py", "/capncord/user.py"], "/capncord/__init__.py": ["/capncord/bot.py", "/capncord/channel.py", "/capncord/user.py", "/capncord/message.py"], "/capncord/user.py": ["/capncord/http.py"], "/capncord/message.py": ["/capncord/__init__.py", "/capncord/user.py"]} |
67,309 | LyricLy/capncord.py | refs/heads/master | /capncord/user.py | from .http import post
class User:
def __init__(self, bot, id_):
self.bot = bot
self.id = id_
self._name = None
self._messages_sent = None
async def update(self):
if not (self._name is None or self._messages_sent is None):
return
resp = await post(self.bot.session, "/get_user", data={"id": self.id})
data = await resp.json()
self._name = data["name"]
self._messages_sent = data["messages_sent"]
@property
async def name(self):
await self.update()
return self._name
@property
async def messages_sent(self):
await self.update()
return self._messages_sent
def __eq__(self, other):
if not isinstance(other, User):
return NotImplemented
return self.id == other.id
def __str__(self):
if self._name:
return self._name
return repr(self)
| {"/capncord/bot.py": ["/capncord/channel.py", "/capncord/http.py", "/capncord/user.py", "/capncord/message.py"], "/capncord/channel.py": ["/capncord/__init__.py", "/capncord/http.py", "/capncord/user.py"], "/capncord/__init__.py": ["/capncord/bot.py", "/capncord/channel.py", "/capncord/user.py", "/capncord/message.py"], "/capncord/user.py": ["/capncord/http.py"], "/capncord/message.py": ["/capncord/__init__.py", "/capncord/user.py"]} |
67,310 | LyricLy/capncord.py | refs/heads/master | /capncord/message.py | from . import channel
from .user import User
import datetime
class Message:
def __init__(self, channel, id_, author, content, created_at):
self.id = id_
self.content = content
self.channel = channel
self.author = author
self.created_at = created_at
@classmethod
def from_data(cls, bot, data):
return cls(channel.Channel(bot, data["channel"]), data["message_id"],
User(bot, data["author_id"]),
data["content"],
datetime.datetime.strptime(data["created_at"], "%Y-%m-%d %H:%M:%S.%f"))
| {"/capncord/bot.py": ["/capncord/channel.py", "/capncord/http.py", "/capncord/user.py", "/capncord/message.py"], "/capncord/channel.py": ["/capncord/__init__.py", "/capncord/http.py", "/capncord/user.py"], "/capncord/__init__.py": ["/capncord/bot.py", "/capncord/channel.py", "/capncord/user.py", "/capncord/message.py"], "/capncord/user.py": ["/capncord/http.py"], "/capncord/message.py": ["/capncord/__init__.py", "/capncord/user.py"]} |
67,314 | neerajvkdixit/BuddyHubServer | refs/heads/master | /python_modules/mongohelper.py | from pymongo import MongoClient
from bson.json_util import dumps
import urllib.parse
import json
from django.conf import settings
class MongoDao:
'Common base class for all employees'
empCount = 0
def __init__(self):
db_conf = settings.CURRENT_DB_CONF
print(db_conf)
self.url = 'mongodb://'+urllib.parse.quote_plus(db_conf["USER"])+':'+urllib.parse.quote_plus(db_conf["PASSWORD"]) +'@'+db_conf["DBURL"]
print(self.url)
#self.url = 'mongodb://%s:%s@'+db_conf["DBURL"]+'/' % (urllib.parse.quote_plus(db_conf["DBNAME"]), urllib.parse.quote_plus(db_conf["PASSWORD"])) + db_conf["DBNAME"]
self.mongoclient = MongoClient(self.url)
def findAll(self,collection,condition,projection):
db_conf = settings.CURRENT_DB_CONF
collectionobj = self.mongoclient[db_conf["DBNAME"]][collection]
res = collectionobj.find(condition,projection)
return json.loads(dumps(res))
def findByKey(self,collection,key,val,outputjson=True):
db_conf = settings.CURRENT_DB_CONF
collectionobj = self.mongoclient[db_conf["DBNAME"]][collection]
res = collectionobj.find_one({key : val})
if(outputjson == False):
return res
return json.loads(dumps(res))
def insertpo(self,collection,mongopo):
db_conf = settings.CURRENT_DB_CONF
collectionobj = self.mongoclient[db_conf["DBNAME"]][collection]
return collectionobj.save(mongopo)
| {"/BuddyHubServer/views.py": ["/python_modules/mongohelper.py"]} |
67,315 | neerajvkdixit/BuddyHubServer | refs/heads/master | /BuddyHubServer/views.py | from django.http import HttpResponse
from django.http import JsonResponse
from pymongo import MongoClient
import urllib.parse
from bson.json_util import dumps
import json
from django.views.decorators.csrf import csrf_exempt
import hashlib
from python_modules.mongohelper import MongoDao
from django.conf import settings
def fetchdata_getcitylist(request):
response_data = {}
projection = {"name": True , "url" : True , "_id":False }
default_city = "Noida"
response_data['result'] = 'success'
response_data['data'] = {}
mongodao = MongoDao()
db_conf = settings.CURRENT_DB_CONF
res = mongodao.findAll(db_conf["CITY_TABLE"],{},projection)
response_data['data']["cities"] = res
response_data['data']["default"] = default_city
return JsonResponse(response_data)
def checkPODTOAndUpdate(po,dto):
keys_to_compare = ["description" , "price" , "imageUrl"]
to_return = {}
to_return["ISUPDATED"] = False
to_return["PO"] = po
po_keys = po.keys()
dto_keys = dto.keys()
for key in keys_to_compare :
if(key in ["imageUrl"]):
for nestedKeyVal in dto[key]:
if(nestedKeyVal not in po[key]):
po[key].append(nestedKeyVal)
to_return["ISUPDATED"] = True
else:
if(key in po_keys and key in dto_keys and po[key] != dto[key]):
po[key] = dto[key]
to_return["ISUPDATED"] = True
elif(key not in po_keys and key in dto_keys):
po[key] = dto[key]
to_return["ISUPDATED"] = True
return to_return
@csrf_exempt
def post_propertydata(request):
response_data = {}
response_data['result'] = 'success'
response_data['data'] = {}
request_body = request.body
request_body = request_body.decode("utf-8")
prop_dto = json.loads(request_body)
if("property" not in prop_dto.keys() or prop_dto["property"] != 1):
response_data['result'] = 'error'
response_data['msg'] = 'input data is not property data'
return JsonResponse(response_data)
prop_key = gethashkeyofprop(prop_dto)
mongodao = MongoDao()
db_conf = settings.CURRENT_DB_CONF
prop_po = mongodao.findByKey(db_conf["PROP_TABLE"],"prop_key",prop_key,False)
if(prop_po is None):
prop_dto["prop_key"] = prop_key
prop_po = prop_dto
else:
prop_dto_keys = prop_dto.keys()
ischanged = False
is_updated_res = checkPODTOAndUpdate(prop_po , prop_dto)
if(is_updated_res["ISUPDATED"] == False):
response_data['result'] = 'error'
response_data['msg'] = 'property exist'
return JsonResponse(response_data)
prop_po = is_updated_res["PO"]
prop_po["screening"] = 0
res = mongodao.insertpo(db_conf["PROP_TABLE"],prop_po)
response_data["message"] = "property updated with id is =>"+str(res)
return JsonResponse(response_data)
def gethashkeyofprop(prop_json):
user_profile_link = ""
title = ""
locality = ""
if("user" in prop_json.keys() and "userProfileUrl" in prop_json["user"]):
user_profile_link = prop_json["user"]["userProfileUrl"]
user_profile_link_hash = hashlib.md5(user_profile_link.encode()).hexdigest()[:10]
if("title" in prop_json.keys()):
title = prop_json["title"]
titlehash = hashlib.md5(title.encode()).hexdigest()[:10]
if("locality" in prop_json.keys()):
locality = prop_json["locality"]
localityhash = hashlib.md5(locality.encode()).hexdigest()[:10]
keyhash = user_profile_link_hash + titlehash + localityhash
return keyhash
| {"/BuddyHubServer/views.py": ["/python_modules/mongohelper.py"]} |
67,317 | benmao/june | refs/heads/master | /june/app.py | #!/usr/bin/env python
import os
os.environ['TZ'] = 'UTC'
os.environ["PYTHON_EGG_CACHE"] = "/tmp/egg"
PROJDIR = os.path.abspath(os.path.dirname(__file__))
ROOTDIR = os.path.split(PROJDIR)[0]
try:
import june
print('Start june version: %s' % june.__version__)
except ImportError:
import site
site.addsitedir(ROOTDIR)
print('Development of june')
import tornado.options
import tornado.locale
from tornado.options import define, options
from tornado.ioloop import IOLoop
from tornado.httpserver import HTTPServer
from tornado import web
from june.lib.util import parse_config_file
# server config
define('port', 8000)
define('debug', True)
define('master', "sqlite:////tmp/june.sqlite")
define('slaves', '')
define('memcache', "127.0.0.1:11211")
# site config
define('sitename', 'June')
define('siteurl', 'http://lepture.com/project/june')
define('sitefeed', '/feed')
define('password_secret', '') # reset it
define('static_path', os.path.join(PROJDIR, 'static'))
define('static_url_prefix', "/static/")
define('login_url', "/account/signin")
define('template_path', os.path.join(PROJDIR, "templates"))
define('dashboard_template_path',
os.path.join(PROJDIR, "dashboard", "templates"))
define('locale_path', os.path.join(PROJDIR, 'locale'))
define('default_locale', 'en_US')
define('xsrf_cookies', True)
define('cookie_secret', '') # reset it
# factor config
define('reply_factor_for_topic', 600)
define('reply_time_factor', 1000)
define('up_factor_for_topic', 1500)
define('up_factor_for_user', 1)
define('down_factor_for_topic', 800)
define('down_factor_for_user', 1)
define('accept_reply_factor_for_user', 1)
define('up_max_for_user', 10)
define('down_max_for_user', 4)
define('vote_max_for_user', 4)
define('promote_topic_cost', 100)
# third party support config
define('gravatar_base_url', "http://www.gravatar.com/avatar/")
define('gravatar_extra', '')
define('recaptcha_key', '')
define('recaptcha_secret', '')
define('recaptcha_theme', 'clean')
define('emoji_url', '')
define('ga', '') # google analytics
define('gcse', '') # google custom search
# image backend
define('backend', 'june.backend.local.LocalBackend')
class Application(web.Application):
def __init__(self):
from june.config import db, cache # init db
from june.urls import handlers, ui_modules
settings = dict(
debug=options.debug,
autoescape=None,
cookie_secret=options.cookie_secret,
xsrf_cookies=options.xsrf_cookies,
login_url=options.login_url,
template_path=options.template_path,
static_path=options.static_path,
static_url_prefix=options.static_url_prefix,
ui_modules=ui_modules,
)
super(Application, self).__init__(handlers, **settings)
Application.db = db.session
Application.cache = cache
tornado.locale.load_translations(options.locale_path)
tornado.locale.set_default_locale(options.default_locale)
def run_server():
define('settings', '')
tornado.options.parse_command_line()
parse_config_file(options.settings)
server = HTTPServer(Application(), xheaders=True)
server.listen(int(options.port))
IOLoop.instance().start()
if __name__ == "__main__":
run_server()
| {"/june/app.py": ["/june/lib/util.py", "/june/config.py", "/june/urls.py"], "/june/handlers/node.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/handlers/account.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/lib/recaptcha.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/lib/handler.py": ["/june/models/mixin.py", "/june/lib/util.py"], "/june/handlers/front.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/dashboard/handlers.py": ["/june/lib/util.py", "/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/backend/local.py": ["/june/backend/__init__.py"], "/june/models/__init__.py": ["/june/config.py"], "/june/models/mixin.py": ["/june/lib/decorators.py", "/june/models/__init__.py"], "/june/backend/upyun.py": ["/june/backend/__init__.py"], "/june/handlers/api.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"]} |
67,318 | benmao/june | refs/heads/master | /june/handlers/node.py | import datetime
import tornado.web
from june.lib.handler import BaseHandler
from june.lib.util import PageMixin
from june.models import Topic, FollowNode
from june.models.mixin import NodeMixin, MemberMixin
class NodeHandler(BaseHandler, NodeMixin, PageMixin):
def head(self, slug):
pass
def get(self, slug):
node = self.get_node_by_slug(slug)
if not node:
self.send_error(404)
return
if self.current_user:
is_following = self.is_user_follow_node(
self.current_user.id, node.id)
else:
is_following = False
self.render('node.html', node=node, is_following=is_following)
class FollowNodeHandler(BaseHandler, NodeMixin):
@tornado.web.authenticated
def get(self, slug):
node = self.get_node_by_slug(slug)
if not node:
self.send_error(404)
return
self.follow_node(node.id)
self.db.commit()
key1 = 'TopicListModule:%s:1:-impact' % self.current_user.id
key2 = 'follownode:%s' % self.current_user.id
key3 = 'FollowedNodesModule:%s' % self.current_user.id
self.cache.delete_multi([key1, key2, key3])
self.redirect('/node/%s' % node.slug)
class UnfollowNodeHandler(BaseHandler, NodeMixin):
@tornado.web.authenticated
def get(self, slug):
node = self.get_node_by_slug(slug)
if not node:
self.send_error(404)
return
sql = 'delete from follownode where user_id=%s and node_id=%s' % \
(self.current_user.id, node.id)
self.db.execute(sql)
self.db.commit()
key1 = 'TopicListModule:%s:1:-impact' % self.current_user.id
key2 = 'follownode:%s' % self.current_user.id
key3 = 'FollowedNodesModule:%s' % self.current_user.id
self.cache.delete_multi([key1, key2, key3])
self.redirect('/node/%s' % node.slug)
class NodeListHandler(BaseHandler, NodeMixin):
def head(self):
pass
def get(self):
nodes = self.get_all_nodes()
self.render('node_list.html', nodes=nodes)
class NodeFeedHandler(BaseHandler, NodeMixin):
def get(self, slug):
self.set_header('Content-Type', 'text/xml; charset=utf-8')
node = self.get_node_by_slug(slug)
if not node:
self.send_error(404)
return
html = self.cache.get('nodefeed:%s' % str(slug))
if html is not None:
self.write(html)
return
topics = Topic.query.filter_by(node_id=node.id).order_by('-id')[:20]
user_ids = (topic.user_id for topic in topics)
users = self.get_users(user_ids)
now = datetime.datetime.utcnow()
html = self.render_string('feed.xml', topics=topics, users=users,
node=node, now=now)
self.cache.set('nodefeed:%s' % str(slug), html, 3600)
self.write(html)
handlers = [
('/nodes', NodeListHandler),
('/node/(\w+)', NodeHandler),
('/node/(\w+)/follow', FollowNodeHandler),
('/node/(\w+)/unfollow', UnfollowNodeHandler),
('/node/(\w+)/feed', NodeFeedHandler),
]
class FollowedNodesModule(tornado.web.UIModule, NodeMixin):
def render(self, user_id, tpl="module/node_list.html"):
#TODO: node_list tpl
key = 'FollowedNodesModule:%s' % str(user_id)
html = self.handler.cache.get(key)
if html is not None:
return html
node_ids = self.get_user_follow_nodes(user_id)
if not node_ids:
return ''
nodes = self.get_nodes(node_ids)
html = self.render_string(tpl, nodes=nodes.itervalues())
self.handler.cache.set(key, html, 600)
return html
class NodeFollowersModule(tornado.web.UIModule, MemberMixin):
def render(self, node_id, tpl='module/member_list.html'):
key = 'NodeFollowersModule:%s' % node_id
html = self.handler.cache.get(key)
if html is not None:
return html
q = FollowNode.query.filter_by(node_id=node_id).limit(5)\
.values('user_id')
#TODO limit number
user_ids = (values[0] for values in q)
users = self.get_users(user_ids)
html = self.render_string(tpl, users=users.itervalues())
self.handler.cache.set(key, html, 600)
return html
ui_modules = {
'FollowedNodesModule': FollowedNodesModule,
'NodeFollowersModule': NodeFollowersModule,
}
| {"/june/app.py": ["/june/lib/util.py", "/june/config.py", "/june/urls.py"], "/june/handlers/node.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/handlers/account.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/lib/recaptcha.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/lib/handler.py": ["/june/models/mixin.py", "/june/lib/util.py"], "/june/handlers/front.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/dashboard/handlers.py": ["/june/lib/util.py", "/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/backend/local.py": ["/june/backend/__init__.py"], "/june/models/__init__.py": ["/june/config.py"], "/june/models/mixin.py": ["/june/lib/decorators.py", "/june/models/__init__.py"], "/june/backend/upyun.py": ["/june/backend/__init__.py"], "/june/handlers/api.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"]} |
67,319 | benmao/june | refs/heads/master | /june/handlers/account.py | import tornado.web
from tornado.auth import GoogleMixin
from datetime import datetime
from june.lib.handler import BaseHandler
from june.lib import validators
from june.lib.util import ObjectDict, PageMixin
from june.lib.recaptcha import RecaptchaMixin
from june.models import Member, MemberLog, Notify, create_token
from june.models.mixin import NodeMixin, MemberMixin
from june.social import services
class SigninHandler(BaseHandler):
def head(self):
pass
def get(self):
if self.current_user:
self.redirect(self.next_url)
return
self.render('signin.html')
def post(self):
account = self.get_argument('account', None)
password = self.get_argument('password', None)
if not (account and password):
self.create_message('Form Error', 'Please fill the required field')
self.render('signin.html')
return
if '@' in account:
user = Member.query.filter_by(email=account).first()
else:
user = Member.query.filter_by(username=account).first()
if user and user.check_password(password):
self.set_secure_cookie('user', '%s/%s' % (user.id, user.token))
self.redirect(self.next_url)
self.create_log(user.id)
return
self.create_message('Form Error', "Invalid account or password")
self.render('signin.html')
def create_log(self, user_id):
ip = self.request.remote_ip
log = MemberLog(user_id=user_id, ip=ip, message='Signin')
self.db.add(log)
self.db.commit()
class GoogleSigninHandler(BaseHandler, GoogleMixin):
@tornado.web.asynchronous
def get(self):
if self.current_user:
self.redirect(self.next_url)
return
if self.get_argument("openid.mode", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authenticate_redirect(ax_attrs=["email"])
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Google auth failed")
email = user["email"].lower()
user = Member.query.filter_by(email=email).first()
if not user:
user = self.create_user(email)
user.password = '!'
self.db.add(user)
self.db.commit()
self.cache.delete('status')
self.set_secure_cookie('user', '%s/%s' % (user.id, user.token))
self.create_log(user.id)
self.redirect(self.next_url)
def create_log(self, user_id):
ip = self.request.remote_ip
log = MemberLog(user_id=user_id, ip=ip, message='Google signin')
self.db.add(log)
self.db.commit()
class SignoutHandler(BaseHandler):
def get(self):
self.clear_cookie('user')
self.redirect(self.next_url)
class SignoutEverywhereHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
user = self.db.query(Member).get(self.current_user.id)
user.token = create_token(16)
self.db.add(user)
self.db.commit()
self.cache.delete('member:%s' % user.id)
self.redirect(self.next_url)
class SignupHandler(BaseHandler, RecaptchaMixin):
def head(self):
pass
def get(self):
if self.current_user:
return self.redirect(self.next_url)
recaptcha = self.recaptcha_render()
self.render('signup.html', email='', recaptcha=recaptcha)
@tornado.web.asynchronous
def post(self):
email = self.get_argument('email', None)
password1 = self.get_argument('password1', None)
password2 = self.get_argument('password2', None)
if not (email and password1 and password2):
self.create_message('Form Error', 'Please fill the required field')
recaptcha = self.recaptcha_render()
self.render('signup.html', email=email, recaptcha=recaptcha)
return
validate = True
if not validators.email(email):
validate = False
self.create_message('Form Error', 'Not a valid email address')
if password1 != password2:
validate = False
self.create_message('Form Error', "Password doesn't match")
if not validate:
recaptcha = self.recaptcha_render()
self.render('signup.html', email=email, recaptcha=recaptcha)
return
member = Member.query.filter_by(email=email).first()
if member:
self.create_message('Form Error',
"This email is already registered")
recaptcha = self.recaptcha_render()
self.render('signup.html', email=email, recaptcha=recaptcha)
return
self.recaptcha_validate(self._on_validate)
def _on_validate(self, response):
email = self.get_argument('email', None)
password = self.get_argument('password1', None)
if not response:
self.create_message('Form Error', 'Captcha not valid')
recaptcha = self.recaptcha_render()
self.render('signup.html', email=email, recaptcha=recaptcha)
return
user = self.create_user(email)
user.password = user.create_password(password)
self.db.add(user)
self.db.commit()
self.cache.delete('status')
self.set_secure_cookie('user', '%s/%s' % (user.id, user.token))
return self.redirect(self.next_url)
class SettingHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
self._setting_render()
def _setting_render(self):
q = MemberLog.query.filter_by(user_id=self.current_user.id)
logs = q.order_by('-id').limit(5)
networks = self.get_user_social(self.current_user.id)
self.render('setting.html', logs=logs,
services=services, networks=networks)
@tornado.web.authenticated
def post(self):
username = self.get_argument('username', None)
website = self.get_argument('website', None)
if not username:
self.create_message('Form Error', 'Please fill the required field')
self._setting_render()
return
if not validators.username(username):
self.create_message('Form Error',
"Username not valid, don't be evil")
self._setting_render()
return
if website and not validators.url(website):
self.create_message('Form Error',
"Website not valid, don't be evil")
self._setting_render()
return
user = self.get_user_by_name(username)
if user and user.id != self.current_user.id:
self.create_message('Form Error',
"Username is registered by other member")
self._setting_render()
return
user = self.db.query(Member).filter_by(id=self.current_user.id).first()
user.username = username
user.website = website
self.db.add(user)
self.create_log(user.id)
self.db.commit()
self.cache.delete_multi([user.id, user.username], key_prefix='member:')
self.redirect('/account/setting')
def create_log(self, user_id):
ip = self.request.remote_ip
log = MemberLog(user_id=user_id, ip=ip, message='Edit account')
self.db.add(log)
class NotifyHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
user = self.db.query(Member).get(self.current_user.id)
notify = Notify.query.filter_by(receiver=user.id).order_by('-id')[:20]
self.render('notify.html', notify=notify)
key1 = 'notify:%s' % user.id
key2 = 'member:%s' % user.id
self.cache.delete_multi([key1, key2])
user.last_notify = datetime.utcnow()
self.db.add(user)
self.db.commit()
class MemberHandler(BaseHandler, NodeMixin):
def head(self, name):
pass
def get(self, name):
user = self.get_user_by_name(name)
if not user:
self.send_error(404)
return
self.render('member.html', user=user)
class MemberListHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
self.render('member_list.html')
handlers = [
('/account/signin', SigninHandler),
('/account/signin/google', GoogleSigninHandler),
('/account/signout', SignoutHandler),
('/account/signout/everywhere', SignoutEverywhereHandler),
('/account/signup', SignupHandler),
('/account/setting', SettingHandler),
('/account/notify', NotifyHandler),
('/member/(\w+)', MemberHandler),
('/members', MemberListHandler),
]
class MemberModule(tornado.web.UIModule, MemberMixin):
def render(self, user, tpl='module/member.html'):
key = 'notify:%s' % user.id
notify = self.handler.cache.get(key)
if notify is None:
q = Notify.query.filter_by(receiver=user.id)
notify = q.filter_by(created__gt=user.last_notify).count()
self.handler.cache.set(key, notify, 600)
html = self.render_string(tpl, user=user, notify=notify)
return html
class MemberListModule(tornado.web.UIModule, PageMixin):
def render(self, tpl='module/member_list.html'):
p = self._get_page()
key = 'MemberListModule:%s' % p
html = self.handler.cache.get(key)
if html is not None:
return html
page = self._get_pagination(
Member.query.order_by('-reputation'),
perpage=30)
page = ObjectDict(page)
html = self.render_string(tpl, page=page)
self.handler.cache.set(key, html, 600)
return html
ui_modules = {
'MemberModule': MemberModule,
'MemberListModule': MemberListModule,
}
| {"/june/app.py": ["/june/lib/util.py", "/june/config.py", "/june/urls.py"], "/june/handlers/node.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/handlers/account.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/lib/recaptcha.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/lib/handler.py": ["/june/models/mixin.py", "/june/lib/util.py"], "/june/handlers/front.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/dashboard/handlers.py": ["/june/lib/util.py", "/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/backend/local.py": ["/june/backend/__init__.py"], "/june/models/__init__.py": ["/june/config.py"], "/june/models/mixin.py": ["/june/lib/decorators.py", "/june/models/__init__.py"], "/june/backend/upyun.py": ["/june/backend/__init__.py"], "/june/handlers/api.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"]} |
67,320 | benmao/june | refs/heads/master | /june/lib/handler.py | import re
import datetime
import logging
from tornado.web import RequestHandler
from tornado.options import options
from tornado import escape
import june
from june.models.mixin import MemberMixin, StorageMixin
from june.lib.filters import safe_markdown, xmldatetime, topiclink
from june.lib.util import ObjectDict
class BaseHandler(RequestHandler, MemberMixin, StorageMixin):
_first_run = True
def initialize(self):
if BaseHandler._first_run:
logging.info('First Run')
BaseHandler._first_run = False
def finish(self, chunk=None):
super(BaseHandler, self).finish(chunk)
if self.get_status() == 500:
try:
self.db.commit()
except:
self.db.rollback()
finally:
self.db.commit()
@property
def db(self):
return self.application.db
@property
def cache(self):
return self.application.cache
def prepare(self):
self._prepare_context()
self._prepare_filters()
def render_string(self, template_name, **kwargs):
kwargs.update(self._filters)
assert "context" not in kwargs, "context is a reserved keyword."
kwargs["context"] = self._context
return super(BaseHandler, self).render_string(template_name, **kwargs)
def write(self, chunk):
if isinstance(chunk, dict):
chunk = escape.json_encode(chunk)
callback = self.get_argument('callback', None)
if callback:
chunk = "%s(%s)" % (callback, escape.to_unicode(chunk))
self.set_header("Content-Type",
"application/javascript; charset=UTF-8")
else:
self.set_header("Content-Type",
"application/json; charset=UTF-8")
super(BaseHandler, self).write(chunk)
def get_current_user(self):
cookie = self.get_secure_cookie("user")
if not cookie:
return None
try:
id, token = cookie.split('/')
id = int(id)
except:
self.clear_cookie("user")
return None
user = self.get_user_by_id(id)
if not user:
return None
if token == user.token:
return user
self.clear_cookie("user")
return None
def is_owner_of(self, model):
if not hasattr(model, 'user_id'):
return False
if not self.current_user:
return False
return model.user_id == self.current_user.id
@property
def next_url(self):
next_url = self.get_argument("next", None)
return next_url or '/'
def _prepare_context(self):
self._context = ObjectDict()
self._context.now = datetime.datetime.utcnow()
self._context.version = june.__version__
self._context.sitename = options.sitename
self._context.siteurl = options.siteurl
self._context.sitefeed = options.sitefeed
self._context.sidebar = self.get_storage('sidebar')
self._context.footer = self.get_storage('footer')
self._context.header = self.get_storage('header')
self._context.ga = options.ga
self._context.gcse = options.gcse
self._context.debug = options.debug
self._context.message = []
def _prepare_filters(self):
self._filters = ObjectDict()
self._filters.markdown = safe_markdown
self._filters.xmldatetime = xmldatetime
self._filters.topiclink = topiclink
self._filters.get_user = self.get_user_by_id
self._filters.is_mobile = self.is_mobile
def create_message(self, header, body):
msg = ObjectDict(header=header, body=body)
self._context.message.append(msg)
def is_mobile(self):
_mobile = (r'ipod|iphone|android|blackberry|palm|nokia|symbian|'
r'samsung|psp|kindle|phone|mobile|ucweb|opera mini|fennec|'
r'webos')
return True if re.search(_mobile, self.user_agent.lower()) else False
def is_spider(self):
_spider = r'bot|crawl|spider|slurp|search|lycos|robozilla|fetcher'
return True if re.search(_spider, self.user_agent.lower()) else False
def is_ajax(self):
return "XMLHttpRequest" == self.request.headers.get("X-Requested-With")
@property
def user_agent(self):
return self.request.headers.get("User-Agent", "bot")
| {"/june/app.py": ["/june/lib/util.py", "/june/config.py", "/june/urls.py"], "/june/handlers/node.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/handlers/account.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/lib/recaptcha.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/lib/handler.py": ["/june/models/mixin.py", "/june/lib/util.py"], "/june/handlers/front.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/dashboard/handlers.py": ["/june/lib/util.py", "/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/backend/local.py": ["/june/backend/__init__.py"], "/june/models/__init__.py": ["/june/config.py"], "/june/models/mixin.py": ["/june/lib/decorators.py", "/june/models/__init__.py"], "/june/backend/upyun.py": ["/june/backend/__init__.py"], "/june/handlers/api.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"]} |
67,321 | benmao/june | refs/heads/master | /june/config.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import memcache
from tornado.options import options
from june.lib.database import SQLAlchemy
if options.master.startswith('mysql'):
master = options.master
slaves = options.slaves.split()
db = SQLAlchemy(master, slaves, pool_recycle=3600, echo=options.debug)
else:
master = options.master
slaves = options.slaves.split()
db = SQLAlchemy(master, slaves, echo=options.debug)
cache = memcache.Client(options.memcache.split(), debug=options.debug)
| {"/june/app.py": ["/june/lib/util.py", "/june/config.py", "/june/urls.py"], "/june/handlers/node.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/handlers/account.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/lib/recaptcha.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/lib/handler.py": ["/june/models/mixin.py", "/june/lib/util.py"], "/june/handlers/front.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/dashboard/handlers.py": ["/june/lib/util.py", "/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/backend/local.py": ["/june/backend/__init__.py"], "/june/models/__init__.py": ["/june/config.py"], "/june/models/mixin.py": ["/june/lib/decorators.py", "/june/models/__init__.py"], "/june/backend/upyun.py": ["/june/backend/__init__.py"], "/june/handlers/api.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"]} |
67,322 | benmao/june | refs/heads/master | /june/handlers/front.py | import datetime
import hashlib
import tornado.web
from tornado.util import import_object
from tornado.options import options
from june.lib.handler import BaseHandler
from june.lib.filters import safe_markdown
from june.lib.decorators import require_user
from june.models import Topic, Member, Node, Reply
from june.models.mixin import NodeMixin
class HomeHandler(BaseHandler, NodeMixin):
def head(self):
pass
def get(self):
# recent join
members = Member.query.order_by('-id').all()[:5]
self.render('home.html', members=members)
class SubscriptionHandler(BaseHandler, NodeMixin):
@tornado.web.authenticated
def get(self):
self.render('subscription.html')
class SiteFeedHandler(BaseHandler):
def get(self):
self.set_header('Content-Type', 'text/xml; charset=utf-8')
html = self.cache.get('sitefeed')
if html is not None:
self.write(html)
return
topics = Topic.query.order_by('-id')[:20]
user_ids = (topic.user_id for topic in topics)
users = self.get_users(user_ids)
now = datetime.datetime.utcnow()
html = self.render_string('feed.xml', topics=topics, users=users,
node=None, now=now)
self.cache.set('sitefeed', html, 1800)
self.write(html)
class PreviewHandler(BaseHandler):
def post(self):
text = self.get_argument('text', '')
self.write(safe_markdown(text))
class SearchHandler(BaseHandler):
def get(self):
query = self.get_argument('q', '')
self.render('search.html', query=query)
class UploadHandler(BaseHandler):
@require_user
@tornado.web.asynchronous
def post(self):
image = self.request.files.get('image', None)
if not image:
self.write('{"stat": "fail", "msg": "no image"}')
return
image = image[0]
content_type = image.get('content_type', '')
if content_type not in ('image/png', 'image/jpeg'):
self.write('{"stat": "fail", "msg": "filetype not supported"}')
return
body = image.get('body', '')
filename = hashlib.md5(body).hexdigest()
if content_type == 'image/png':
filename += '.png'
else:
filename += '.jpg'
backend = import_object(options.backend)()
backend.save(body, filename, self._on_post)
def _on_post(self, result):
if result:
self.write('{"stat":"ok", "url":"%s"}' % result)
else:
self.write('{"stat":"fail", "msg": "server error"}')
self.finish()
handlers = [
('/', HomeHandler),
('/subscription', SubscriptionHandler),
('/preview', PreviewHandler),
('/feed', SiteFeedHandler),
('/search', SearchHandler),
('/upload', UploadHandler),
]
class SystemStatusModule(tornado.web.UIModule):
def render(self, tpl="module/status.html"):
status = self.handler.cache.get('status')
if status is None:
status = {}
status['node'] = Node.query.count()
status['topic'] = Topic.query.count()
status['member'] = Member.query.count()
status['reply'] = Reply.query.count()
self.handler.cache.set('status', status, 600)
return self.render_string(tpl, status=status)
ui_modules = {
'SystemStatusModule': SystemStatusModule,
}
| {"/june/app.py": ["/june/lib/util.py", "/june/config.py", "/june/urls.py"], "/june/handlers/node.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/handlers/account.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/lib/recaptcha.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/lib/handler.py": ["/june/models/mixin.py", "/june/lib/util.py"], "/june/handlers/front.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/dashboard/handlers.py": ["/june/lib/util.py", "/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/backend/local.py": ["/june/backend/__init__.py"], "/june/models/__init__.py": ["/june/config.py"], "/june/models/mixin.py": ["/june/lib/decorators.py", "/june/models/__init__.py"], "/june/backend/upyun.py": ["/june/backend/__init__.py"], "/june/handlers/api.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"]} |
67,323 | benmao/june | refs/heads/master | /june/dashboard/handlers.py | from tornado.options import options
from june.lib.util import ObjectDict
from june.lib.handler import BaseHandler
from june.lib.decorators import require_admin
from june.models import Topic, Member, Node, Reply
from june.models.mixin import NodeMixin, TopicMixin
class DashMixin(object):
def update_model(self, model, attr, required=False):
value = self.get_argument(attr, '')
if required and value:
setattr(model, attr, value)
elif not required:
setattr(model, attr, value)
class DashHandler(BaseHandler):
def get_template_path(self):
return options.dashboard_template_path
class EditStorage(DashHandler):
@require_admin
def post(self):
self.set_storage('header', self.get_argument('header', ''))
self.set_storage('sidebar', self.get_argument('sidebar', ''))
self.set_storage('footer', self.get_argument('footer', ''))
self.db.commit()
self.redirect('/dashboard')
class CreateNode(DashHandler):
@require_admin
def get(self):
node = ObjectDict()
self.render('node.html', node=node)
@require_admin
def post(self):
o = ObjectDict()
o.title = self.get_argument('title', None)
o.slug = self.get_argument('slug', None)
o.avatar = self.get_argument('avatar', None)
o.description = self.get_argument('description', None)
o.fgcolor = self.get_argument('fgcolor', None)
o.bgcolor = self.get_argument('bgcolor', None)
o.header = self.get_argument('header', None)
o.sidebar = self.get_argument('sidebar', None)
o.footer = self.get_argument('footer', None)
try:
o.limit_reputation = int(self.get_argument('reputation', 0))
except:
o.limit_reputation = 0
try:
o.limit_role = int(self.get_argument('role', 0))
except:
o.limit_role = 0
if not (o.slug and o.title and o.description):
self.create_message('Form Error', 'Please fill the required field')
self.render('node.html', node=o)
return
node = Node(**o)
self.db.add(node)
self.db.commit()
self.cache.delete('allnodes')
self.redirect('/dashboard')
class EditNode(DashHandler, DashMixin):
@require_admin
def get(self, slug):
node = Node.query.filter_by(slug=slug).first()
if not node:
self.send_error(404)
return
self.render('node.html', node=node)
@require_admin
def post(self, slug):
node = self.db.query(Node).filter_by(slug=slug).first()
if not node:
self.send_error(404)
return
self.update_model(node, 'title', True)
self.update_model(node, 'slug', True)
self.update_model(node, 'avatar')
self.update_model(node, 'description', True)
self.update_model(node, 'fgcolor')
self.update_model(node, 'bgcolor')
self.update_model(node, 'header')
self.update_model(node, 'sidebar')
self.update_model(node, 'footer')
try:
node.limit_reputation = int(self.get_argument('reputation', 0))
except:
node.limit_reputation = 0
try:
node.limit_role = int(self.get_argument('role', 0))
except:
node.limit_role = 0
self.db.add(node)
self.db.commit()
self.cache.delete('node:%s' % str(slug))
self.redirect('/node/%s' % node.slug)
class FlushCache(DashHandler):
@require_admin
def get(self):
self.cache.flush_all()
self.write('done')
class EditMember(DashHandler, DashMixin):
@require_admin
def get(self, name):
user = Member.query.filter_by(username=name).first()
if not user:
self.send_error(404)
return
self.render('member.html', user=user)
@require_admin
def post(self, name):
user = self.db.query(Member).filter_by(username=name).first()
if not user:
self.send_error(404)
return
self.update_model(user, 'username', True)
self.update_model(user, 'email', True)
self.update_model(user, 'role', True)
self.update_model(user, 'reputation', True)
self.db.add(user)
self.db.commit()
self.cache.delete('user:%s' % str(user.id))
self.redirect('/dashboard')
class EditTopic(DashHandler, TopicMixin):
@require_admin
def get(self, id):
topic = self.get_topic_by_id(id)
if not topic:
self.send_error(404)
return
self.render('topic.html', topic=topic)
@require_admin
def post(self, id):
topic = self.db.query(Topic).filter_by(id=id).first()
if not topic:
self.send_error(404)
return
impact = self.get_argument('impact', None)
node = self.get_argument('node', None)
try:
topic.impact = int(impact)
except:
pass
try:
topic.node_id = int(node)
except:
pass
self.db.add(topic)
self.db.commit()
self.cache.delete('topic:%s' % topic.id)
self.redirect('/topic/%d' % topic.id)
class EditReply(DashHandler):
@require_admin
def get(self, id):
reply = self.db.query(Reply).filter_by(id=id).first()
if not reply:
self.send_error(404)
return
if self.get_argument('delete', 'false') == 'true':
topic = self.db.query(Topic).filter_by(id=reply.topic_id).first()
topic.reply_count -= 1
self.db.add(topic)
self.db.delete(reply)
self.db.commit()
self.redirect('/dashboard')
return
self.render('reply.html', reply=reply)
@require_admin
def post(self, id):
reply = self.db.query(Reply).filter_by(id=id).first()
if not reply:
self.send_error(404)
return
content = self.get_argument('content', '')
reply.content = content
self.db.add(reply)
self.db.commit()
self.redirect('/dashboard')
class Dashboard(DashHandler, NodeMixin):
@require_admin
def get(self):
user = self.get_argument('user', None)
if user:
self.redirect('/dashboard/member/%s' % user)
return
cache = self.get_argument('cache', None)
if cache:
self.cache.delete(str(cache))
self.redirect('/dashboard')
return
nodes = Node.query.all()
self.render('index.html', nodes=nodes)
handlers = [
('/dashboard', Dashboard),
('/dashboard/storage', EditStorage),
('/dashboard/node', CreateNode),
('/dashboard/node/(\w+)', EditNode),
('/dashboard/member/(.*)', EditMember),
('/dashboard/topic/(\d+)', EditTopic),
('/dashboard/reply/(\d+)', EditReply),
('/dashboard/flushcache', FlushCache),
]
| {"/june/app.py": ["/june/lib/util.py", "/june/config.py", "/june/urls.py"], "/june/handlers/node.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/handlers/account.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/lib/recaptcha.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/lib/handler.py": ["/june/models/mixin.py", "/june/lib/util.py"], "/june/handlers/front.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/dashboard/handlers.py": ["/june/lib/util.py", "/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/backend/local.py": ["/june/backend/__init__.py"], "/june/models/__init__.py": ["/june/config.py"], "/june/models/mixin.py": ["/june/lib/decorators.py", "/june/models/__init__.py"], "/june/backend/upyun.py": ["/june/backend/__init__.py"], "/june/handlers/api.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"]} |
67,324 | benmao/june | refs/heads/master | /june/backend/local.py | import os.path
from tornado.options import options
from june.backend import Backend
class LocalBackend(Backend):
def save(self, body, filename, callback=None):
path = os.path.join(options.local_static_path, filename)
f = open(path, 'w')
f.write(body)
f.close()
if not callback:
return
callback(os.path.join(options.local_static_url, filename))
return
| {"/june/app.py": ["/june/lib/util.py", "/june/config.py", "/june/urls.py"], "/june/handlers/node.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/handlers/account.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/lib/recaptcha.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/lib/handler.py": ["/june/models/mixin.py", "/june/lib/util.py"], "/june/handlers/front.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/dashboard/handlers.py": ["/june/lib/util.py", "/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/backend/local.py": ["/june/backend/__init__.py"], "/june/models/__init__.py": ["/june/config.py"], "/june/models/mixin.py": ["/june/lib/decorators.py", "/june/models/__init__.py"], "/june/backend/upyun.py": ["/june/backend/__init__.py"], "/june/handlers/api.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"]} |
67,325 | benmao/june | refs/heads/master | /june/models/__init__.py | """
Member:
only the basic info of a user
role:
staff > 6
admin > 9
active > 1
not verified email = 1
deactive < 1
reputation:
reputation means the value of a member, it affects in topic and
everything
1. when user's topic is up voted, reputation increase:
+ n1 * log(user.reputation)
2. when user's topic is down voted, reputation decrease:
- n2 * log(user.reputation)
Topic:
topic must be in a node
impact:
for sorting topic
1. when user reply a topic, impact increase:
+ (n1 + day_del * n2) * log(user.reputation)
2. when user up vote a topic, impact increase:
+ n3 * log(user.reputation)
3. when user down vote a topic, impact decrease:
- n4 * log(user.reputation)
"""
import time
import hashlib
from random import choice
from datetime import datetime
from sqlalchemy import Column
from sqlalchemy import Integer, String, DateTime, Text
from tornado.options import options
from june.config import db
def get_current_impact():
return int(time.time())
def create_token(length=16):
chars = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
salt = ''.join([choice(chars) for i in range(length)])
return salt
class Member(db.Model):
username = Column(String(100), unique=True, index=True)
email = Column(String(200), unique=True, nullable=False, index=True)
password = Column(String(100), nullable=False)
avatar = Column(String(400))
website = Column(String(400))
role = Column(Integer, default=2)
reputation = Column(Integer, default=20, index=True)
token = Column(String(16))
created = Column(DateTime, default=datetime.utcnow)
last_notify = Column(DateTime, default=datetime.utcnow)
def __init__(self, email, **kwargs):
self.email = email.lower()
self.token = create_token(16)
if 'username' not in kwargs:
self.username = self.email.split('@')[0].lower()
for k, v in kwargs.items():
setattr(self, k, v)
def get_avatar(self, size=48):
if self.avatar:
return self.avatar
md5email = hashlib.md5(self.email).hexdigest()
query = "%s?s=%s%s" % (md5email, size, options.gravatar_extra)
return options.gravatar_base_url + query
@staticmethod
def create_password(raw):
salt = create_token(8)
hsh = hashlib.sha1(salt + raw + options.password_secret).hexdigest()
return "%s$%s" % (salt, hsh)
def check_password(self, raw):
if '$' not in self.password:
return False
salt, hsh = self.password.split('$')
verify = hashlib.sha1(salt + raw + options.password_secret).hexdigest()
return verify == hsh
class MemberLog(db.Model):
user_id = Column(Integer, nullable=False, index=True)
message = Column(String(100))
time = Column(DateTime, default=datetime.utcnow)
ip = Column(String(100))
class Social(db.Model):
user_id = Column(Integer, nullable=False, index=True)
enabled = Column(String(1), default='y')
service = Column(String(100)) # twitter, douban etc.
token = Column(Text) # use json string
class Notify(db.Model):
sender = Column(Integer, nullable=False)
receiver = Column(Integer, nullable=False, index=True)
content = Column(String(400))
label = Column(String(200))
link = Column(String(400))
type = Column(String(20), default='reply')
created = Column(DateTime, default=datetime.utcnow)
class Node(db.Model):
title = Column(String(200), nullable=False)
slug = Column(String(200), nullable=False, index=True)
avatar = Column(String(400))
description = Column(String(1000))
fgcolor = Column(String(40))
bgcolor = Column(String(40))
header = Column(String(2000))
sidebar = Column(String(2000))
footer = Column(String(2000))
created = Column(DateTime, default=datetime.utcnow)
updated = Column(DateTime, default=datetime.utcnow,
onupdate=datetime.utcnow)
limit_reputation = Column(Integer, default=0)
limit_role = Column(Integer, default=2)
topic_count = Column(Integer, default=0)
class FollowNode(db.Model):
user_id = Column(Integer, nullable=False, index=True)
node_id = Column(Integer, nullable=False, index=True)
created = Column(DateTime, default=datetime.utcnow)
class Topic(db.Model):
node_id = Column(Integer, nullable=False, index=True)
user_id = Column(Integer, nullable=False, index=True)
title = Column(String(200))
content = Column(Text)
created = Column(DateTime, default=datetime.utcnow)
updated = Column(DateTime, default=datetime.utcnow,
onupdate=datetime.utcnow, index=True)
status = Column(String(40))
hits = Column(Integer, default=1)
ups = Column(Text) # e.g. 1,2,3,4
downs = Column(Text)
impact = Column(Integer, default=get_current_impact, index=True)
reply_count = Column(Integer, default=0)
last_reply_by = Column(Integer, default=0)
last_reply_time = Column(DateTime, default=datetime.utcnow, index=True)
@property
def up_users(self):
if not self.ups:
return []
return (int(i) for i in self.ups.split(','))
@property
def down_users(self):
if not self.downs:
return []
return (int(i) for i in self.downs.split(','))
class Reply(db.Model):
topic_id = Column(Integer, nullable=False, index=True)
user_id = Column(Integer, nullable=False, index=True)
content = Column(String(2000))
created = Column(DateTime, default=datetime.utcnow)
accepted = Column(String(1), default='n') # accepted by topic owner
class Storage(db.Model):
"""A key-value storage"""
key = Column(String(100), nullable=False, index=True)
value = Column(Text)
| {"/june/app.py": ["/june/lib/util.py", "/june/config.py", "/june/urls.py"], "/june/handlers/node.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/handlers/account.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/lib/recaptcha.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/lib/handler.py": ["/june/models/mixin.py", "/june/lib/util.py"], "/june/handlers/front.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/dashboard/handlers.py": ["/june/lib/util.py", "/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/backend/local.py": ["/june/backend/__init__.py"], "/june/models/__init__.py": ["/june/config.py"], "/june/models/mixin.py": ["/june/lib/decorators.py", "/june/models/__init__.py"], "/june/backend/upyun.py": ["/june/backend/__init__.py"], "/june/handlers/api.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"]} |
67,326 | benmao/june | refs/heads/master | /june/lib/decorators.py | import logging
import functools
import urlparse
class require_role(object):
def __init__(self, role):
self.role = role
def __call__(self, method):
@functools.wraps(method)
def wrapper(handler, *args, **kwargs):
if not handler.current_user:
url = handler.get_login_url()
if '?' not in url:
if urlparse.urlsplit(url).scheme:
next_url = handler.request.full_url()
else:
next_url = handler.request.uri
url += '?next=' + next_url
return handler.redirect(url)
user = handler.current_user
if user.role == 1:
return handler.redirect('/account/verify')
if user.role == 0:
return handler.redirect('/doc/guideline')
if user.role < self.role:
return handler.send_error(403)
return method(handler, *args, **kwargs)
return wrapper
require_user = require_role(2)
require_staff = require_role(6)
require_admin = require_role(9)
class cache(object):
"""Cache decorator, an easy way to manage cache.
The result key will be like: prefix:arg1-arg2k1#v1k2#v2
"""
def __init__(self, prefix, time=0):
self.prefix = prefix
self.time = time
def __call__(self, method):
@functools.wraps(method)
def wrapper(handler, *args, **kwargs):
if not hasattr(handler, 'cache'):
# fix for UIModule
handler.cache = handler.handler.cache
if not handler.cache:
return method(handler, *args, **kwargs)
if args:
key = self.prefix + ':' + '-'.join(str(a) for a in args)
else:
key = self.prefix
if kwargs:
for k, v in kwargs.iteritems():
key += '%s#%s' % (k, v)
value = handler.cache.get(key)
if value is None:
value = method(handler, *args, **kwargs)
try:
handler.cache.set(key, value, self.time)
except:
logging.warn('cache error: %s' % key)
pass
return value
return wrapper
def require_system(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self.request.remote_ip != '127.0.0.1':
self.send_error(403)
return
return method(self, *args, **kwargs)
return wrapper
| {"/june/app.py": ["/june/lib/util.py", "/june/config.py", "/june/urls.py"], "/june/handlers/node.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/handlers/account.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/lib/recaptcha.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/lib/handler.py": ["/june/models/mixin.py", "/june/lib/util.py"], "/june/handlers/front.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/dashboard/handlers.py": ["/june/lib/util.py", "/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/backend/local.py": ["/june/backend/__init__.py"], "/june/models/__init__.py": ["/june/config.py"], "/june/models/mixin.py": ["/june/lib/decorators.py", "/june/models/__init__.py"], "/june/backend/upyun.py": ["/june/backend/__init__.py"], "/june/handlers/api.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"]} |
67,327 | benmao/june | refs/heads/master | /june/models/mixin.py | from june.lib.decorators import cache
from june.models import create_token
from june.models import Member, Social, Notify
from june.models import Node, FollowNode
from june.models import Topic
from june.models import Storage
def get_cache_list(handler, model, id_list, key_prefix, time=600):
if hasattr(handler, 'cache'):
cache = handler.cache
else:
cache = handler.handler.cache
if not id_list:
return {}
id_list = set(id_list)
data = cache.get_multi(id_list, key_prefix=key_prefix)
missing = id_list - set(data)
if missing:
dct = {}
for item in model.query.filter_by(id__in=missing).all():
dct[item.id] = item
cache.set_multi(dct, time=time, key_prefix=key_prefix)
data.update(dct)
return data
class MemberMixin(object):
@cache('member', 600)
def get_user_by_id(self, id):
return Member.query.filter_by(id=id).first()
@cache("member", 600)
def get_user_by_name(self, name):
return Member.query.filter_by(username=name).first()
def get_users(self, id_list):
return get_cache_list(self, Member, id_list, 'member:')
def create_user(self, email):
username = email.split('@')[0].lower()
username = username.replace('.', '').replace('-', '')
member = self.get_user_by_name(username)
if member:
username = username + create_token(5)
user = Member(email, username=username)
return user
@cache('social', 6000)
def get_user_social(self, user_id):
dct = {}
for net in Social.query.filter_by(user_id=user_id):
dct[net.service] = {'token': net.token, 'enabled': net.enabled}
return dct
class NotifyMixin(object):
def create_notify(self, receiver, title, content, link, type):
notify = Notify(sender=self.current_user.id, receiver=receiver,
label=title, link=link, content=content[:100],
type=type)
self.db.add(notify)
if not hasattr(self, 'cache'):
self.cache = self.handler.cache
self.cache.delete('notify:%s' % receiver)
return notify
def create_reply_notify(self, receiver, topic, content):
if receiver == self.current_user.id:
return
link = '/topic/%s#reply-%s' % (topic.id, topic.reply_count)
title = topic.title
return self.create_notify(receiver, title, content, link, 'reply')
def create_mention(self, username, topic, content):
user = self.cache.get('member:%s' % str(username))
if user is None:
user = Member.query.filter_by(username=username).first()
self.cache.set('member:%s' % str(username), user, 600)
if not user:
return
if user.id == self.current_user.id:
return
if user.id == topic.user_id:
#avoid double notify
return
link = '/topic/%s#reply-%s' % (topic.id, topic.reply_count)
title = topic.title
return self.create_notify(user.id, title, content, link, 'mention')
class NodeMixin(object):
@cache('node', 600)
def get_node_by_id(self, id):
return Node.query.filter_by(id=id).first()
@cache('node', 600)
def get_node_by_slug(self, slug):
return Node.query.filter_by(slug=slug).first()
@cache('allnodes', 600)
def get_all_nodes(self):
nodes = Node.query.all()
nodes = sorted(nodes, key=lambda o: o.updated, reverse=True)
return nodes
def get_nodes(self, id_list):
return get_cache_list(self, Node, id_list, 'node:')
def follow_node(self, node_id):
if not self.current_user:
return 0
nodes = self.get_user_follow_nodes(self.current_user.id)
if node_id in nodes:
return node_id
nodes.append(node_id)
user = self.current_user
self.cache.set('follownode:%s' % user.id, nodes, 6000)
f = FollowNode(user_id=user.id, node_id=node_id)
self.db.add(f)
return node_id
@cache('follownode', 6000)
def get_user_follow_nodes(self, user_id):
q = FollowNode.query.filter_by(user_id=user_id).values('node_id')
nodes = []
for values in q:
nodes.append(values[0])
return nodes
def is_user_follow_node(self, user_id, node_id):
# should query again ?
return node_id in self.get_user_follow_nodes(user_id)
class TopicMixin(object):
@cache('topic', 600)
def get_topic_by_id(self, id):
return Topic.query.filter_by(id=id).first()
class StorageMixin(object):
@cache('storage', 0)
def get_storage(self, key):
data = Storage.query.filter_by(key=key).first()
if data:
return data.value
return None
def set_storage(self, key, value):
data = self.db.query(Storage).filter_by(key=key).first()
if not data:
data = Storage(key=key, value=value)
else:
data.value = value
self.cache.delete('storage:%s' % key)
self.db.add(data)
return data
| {"/june/app.py": ["/june/lib/util.py", "/june/config.py", "/june/urls.py"], "/june/handlers/node.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/handlers/account.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/lib/recaptcha.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/lib/handler.py": ["/june/models/mixin.py", "/june/lib/util.py"], "/june/handlers/front.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/dashboard/handlers.py": ["/june/lib/util.py", "/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/backend/local.py": ["/june/backend/__init__.py"], "/june/models/__init__.py": ["/june/config.py"], "/june/models/mixin.py": ["/june/lib/decorators.py", "/june/models/__init__.py"], "/june/backend/upyun.py": ["/june/backend/__init__.py"], "/june/handlers/api.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"]} |
67,328 | benmao/june | refs/heads/master | /june/lib/util.py | import functools
from tornado import ioloop, stack_context
from tornado.options import define, options
def parse_config_file(path):
config = {}
exec(compile(open(path).read(), path, 'exec'), config, config)
for name in config:
if name in options:
options[name].set(config[name])
else:
define(name, config[name])
class ObjectDict(dict):
def __getattr__(self, key):
if key in self:
return self[key]
return None
def __setattr__(self, key, value):
self[key] = value
def delay_call(func, *arg, **kwargs):
with stack_context.NullContext():
io = ioloop.IOLoop.instance()
io.add_callback(functools.partial(func, *arg, **kwargs))
class PageMixin(object):
def _get_order(self):
if not hasattr(self, 'get_argument'):
self.get_argument = self.handler.get_argument
order = self.get_argument('o', '0')
if order == '1':
return '-id'
return '-impact'
def _get_page(self):
if not hasattr(self, 'get_argument'):
self.get_argument = self.handler.get_argument
page = self.get_argument('p', '1')
try:
return int(page)
except:
return 1
def _get_pagination(self, q, count=None, perpage=20):
page = self._get_page()
start = (page - 1) * perpage
end = page * perpage
if not count:
count = q.count()
dct = {}
page_number = (count - 1) / perpage + 1 # this algorithm is fabulous
dct['page_number'] = page_number
dct['datalist'] = q[start:end]
if page < 5:
dct['pagelist'] = range(1, min(page_number, 9) + 1)
elif page + 4 > page_number:
dct['pagelist'] = range(max(page_number - 8, 1), page_number + 1)
else:
dct['pagelist'] = range(page - 4, min(page_number, page + 4) + 1)
dct['current_page'] = page
dct['item_number'] = count
return dct
| {"/june/app.py": ["/june/lib/util.py", "/june/config.py", "/june/urls.py"], "/june/handlers/node.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/handlers/account.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/lib/recaptcha.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/lib/handler.py": ["/june/models/mixin.py", "/june/lib/util.py"], "/june/handlers/front.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/dashboard/handlers.py": ["/june/lib/util.py", "/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/backend/local.py": ["/june/backend/__init__.py"], "/june/models/__init__.py": ["/june/config.py"], "/june/models/mixin.py": ["/june/lib/decorators.py", "/june/models/__init__.py"], "/june/backend/upyun.py": ["/june/backend/__init__.py"], "/june/handlers/api.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"]} |
67,329 | benmao/june | refs/heads/master | /june/backend/__init__.py |
class Backend(object):
@classmethod
def save(cls, body, filename, callback=None):
raise NotImplemented
| {"/june/app.py": ["/june/lib/util.py", "/june/config.py", "/june/urls.py"], "/june/handlers/node.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/handlers/account.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/lib/recaptcha.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/lib/handler.py": ["/june/models/mixin.py", "/june/lib/util.py"], "/june/handlers/front.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/dashboard/handlers.py": ["/june/lib/util.py", "/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/backend/local.py": ["/june/backend/__init__.py"], "/june/models/__init__.py": ["/june/config.py"], "/june/models/mixin.py": ["/june/lib/decorators.py", "/june/models/__init__.py"], "/june/backend/upyun.py": ["/june/backend/__init__.py"], "/june/handlers/api.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"]} |
67,330 | benmao/june | refs/heads/master | /june/urls.py | from june.handlers import front
from june.handlers import node
from june.handlers import topic
from june.handlers import account
from june.handlers import api
from june import social
from june.dashboard import handlers as dashboard
handlers = []
handlers.extend(account.handlers)
handlers.extend(dashboard.handlers)
handlers.extend(topic.handlers)
handlers.extend(node.handlers)
handlers.extend(api.handlers)
handlers.extend(social.handlers)
handlers.extend(front.handlers)
sub_handlers = []
ui_modules = {}
ui_modules.update(topic.ui_modules)
ui_modules.update(node.ui_modules)
ui_modules.update(front.ui_modules)
ui_modules.update(account.ui_modules)
| {"/june/app.py": ["/june/lib/util.py", "/june/config.py", "/june/urls.py"], "/june/handlers/node.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/handlers/account.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/lib/recaptcha.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/lib/handler.py": ["/june/models/mixin.py", "/june/lib/util.py"], "/june/handlers/front.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/dashboard/handlers.py": ["/june/lib/util.py", "/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/backend/local.py": ["/june/backend/__init__.py"], "/june/models/__init__.py": ["/june/config.py"], "/june/models/mixin.py": ["/june/lib/decorators.py", "/june/models/__init__.py"], "/june/backend/upyun.py": ["/june/backend/__init__.py"], "/june/handlers/api.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"]} |
67,331 | benmao/june | refs/heads/master | /june/lib/recaptcha.py | import logging
import urllib
from tornado.auth import httpclient
from tornado.options import options
class RecaptchaMixin(object):
RECAPTCHA_VERIFY_URL = "http://www.google.com/recaptcha/api/verify"
def recaptcha_render(self):
token = self._recaptcha_token()
html = ('<div id="recaptcha_div"></div><script type="text/javascript" '
'src="http://www.google.com/recaptcha/api/js/recaptcha_ajax.js'
'"></script><script type="text/javascript">Recaptcha.create'
'("%(key)s", "recaptcha_div", {theme: "%(theme)s",callback:'
'Recaptcha.focus_response_field});</script>')
return html % token
def recaptcha_validate(self, callback):
token = self._recaptcha_token()
challenge = self.get_argument('recaptcha_challenge_field', None)
response = self.get_argument('recaptcha_response_field', None)
callback = self.async_callback(self._on_recaptcha_request, callback)
http = httpclient.AsyncHTTPClient()
post_args = {
'privatekey': token['secret'],
'remoteip': self.request.remote_ip,
'challenge': challenge,
'response': response
}
http.fetch(self.RECAPTCHA_VERIFY_URL, method="POST",
body=urllib.urlencode(post_args), callback=callback)
def _on_recaptcha_request(self, callback, response):
if response.error:
logging.warning("Error response %s fetching %s", response.error,
response.request.url)
callback(None)
return
verify, message = response.body.split()
if verify == 'true':
callback(response.body)
else:
logging.warning("Recaptcha verify failed %s", message)
callback(None)
def _recaptcha_token(self):
token = dict(
key=options.recaptcha_key,
secret=options.recaptcha_secret,
theme=options.recaptcha_theme,
)
return token
| {"/june/app.py": ["/june/lib/util.py", "/june/config.py", "/june/urls.py"], "/june/handlers/node.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/handlers/account.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/lib/recaptcha.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/lib/handler.py": ["/june/models/mixin.py", "/june/lib/util.py"], "/june/handlers/front.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/dashboard/handlers.py": ["/june/lib/util.py", "/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/backend/local.py": ["/june/backend/__init__.py"], "/june/models/__init__.py": ["/june/config.py"], "/june/models/mixin.py": ["/june/lib/decorators.py", "/june/models/__init__.py"], "/june/backend/upyun.py": ["/june/backend/__init__.py"], "/june/handlers/api.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"]} |
67,332 | benmao/june | refs/heads/master | /june/backend/upyun.py | import base64
import functools
from tornado import httpclient
from tornado.options import options
from june.backend import Backend
class Upyun(object):
def __init__(self, bucket_with_dir, username, password, static_url=None):
_ = bucket_with_dir.split('/')
bucket = _[0]
self.url = 'http://v0.api.upyun.com/' + bucket_with_dir + '/'
self.username = username
self.password = password
self.static_url = 'http://%s.b0.upaiyun.com/%s/' \
% (bucket, '/'.join(_[1:]))
if static_url:
self.static_url = self.static_url
def basic_auth_header(self):
auth = base64.b64encode('%s:%s' % (self.username, self.password))
headers = {'Authorization': 'Basic %s' % auth}
return headers
def signature_auth_header(self):
#TODO
pass
def get_usage(self, callback=None):
url = self.url + '?usage'
http = httpclient.AsyncHTTPClient()
http.fetch(url, method='GET', headers=self.basic_auth_header(),
callback=callback)
return
def upload(self, body, filename, callback=None):
url = self.url + filename
http = httpclient.AsyncHTTPClient()
http.fetch(
url, method='PUT', headers=self.basic_auth_header(), body=body,
callback=functools.partial(self._on_upload, callback, filename))
return
def _on_upload(self, callback, filename, response):
if not callback:
return
if response.error:
callback(None)
return
callback(self.static_url + filename)
return
class UpyunBackend(Backend):
def save(self, body, filename, callback):
upyun = Upyun(options.upyun_bucket, options.upyun_username,
options.upyun_password)
if hasattr(options, 'upyun_static_url'):
upyun.static_url = options.upyun_static_url
upyun.upload(body, filename, callback)
return
| {"/june/app.py": ["/june/lib/util.py", "/june/config.py", "/june/urls.py"], "/june/handlers/node.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/handlers/account.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/lib/recaptcha.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/lib/handler.py": ["/june/models/mixin.py", "/june/lib/util.py"], "/june/handlers/front.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/dashboard/handlers.py": ["/june/lib/util.py", "/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/backend/local.py": ["/june/backend/__init__.py"], "/june/models/__init__.py": ["/june/config.py"], "/june/models/mixin.py": ["/june/lib/decorators.py", "/june/models/__init__.py"], "/june/backend/upyun.py": ["/june/backend/__init__.py"], "/june/handlers/api.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"]} |
67,333 | benmao/june | refs/heads/master | /setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='june',
version='1.0',
author='Hsiaoming Young',
author_email='lepture@me.com',
url='http://lepture.com/project/june',
packages=find_packages(),
description='June: a forum',
entry_points={
'console_scripts': [
'june.server= june.app:run_server',
'june.tools= june.tools:main',
],
},
install_requires=[
'python-memcached',
'markdown',
'pygments',
'tornado',
'SQLAlchemy',
],
include_package_data=True,
license='BSD License',
)
| {"/june/app.py": ["/june/lib/util.py", "/june/config.py", "/june/urls.py"], "/june/handlers/node.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/handlers/account.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/lib/recaptcha.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/lib/handler.py": ["/june/models/mixin.py", "/june/lib/util.py"], "/june/handlers/front.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/dashboard/handlers.py": ["/june/lib/util.py", "/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/backend/local.py": ["/june/backend/__init__.py"], "/june/models/__init__.py": ["/june/config.py"], "/june/models/mixin.py": ["/june/lib/decorators.py", "/june/models/__init__.py"], "/june/backend/upyun.py": ["/june/backend/__init__.py"], "/june/handlers/api.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"]} |
67,334 | benmao/june | refs/heads/master | /june/handlers/api.py | import math
from tornado.options import options
from june.lib.handler import BaseHandler
from june.lib.decorators import require_user
from june.models import Topic, Member, Reply
from june.models.mixin import NotifyMixin
class UpTopicHandler(BaseHandler):
"""Up a topic will increase impact of the topic,
and increase reputation of the creator
"""
@require_user
def post(self, id):
topic = self.db.query(Topic).filter_by(id=id).first()
if not topic:
self.send_error(404)
return
user_id = self.current_user.id
if topic.user_id == user_id:
# you can't vote your own topic
dct = {'status': 'fail', 'msg': 'cannot up vote your own topic'}
self.write(dct)
return
if user_id in topic.down_users:
# you can't up and down vote at the same time
dct = {'status': 'fail', 'msg': 'cannot up vote your down topic'}
self.write(dct)
return
creator = self.db.query(Member).filter_by(id=topic.user_id).first()
up_users = list(topic.up_users)
if user_id in up_users:
up_users.remove(user_id)
topic.ups = ','.join(str(i) for i in up_users)
topic.impact -= self._calc_topic_impact()
creator.reputation -= self._calc_user_impact()
self.db.add(creator)
self.db.add(topic)
self.db.commit()
dct = {'status': 'ok'}
dct['data'] = {'action': 'cancel', 'count': len(up_users)}
self.write(dct)
self.cache.delete('topic:%s' % str(id))
return
up_users.append(user_id)
topic.ups = ','.join(str(i) for i in up_users)
topic.impact += self._calc_topic_impact()
creator.reputation += self._calc_user_impact()
self.db.add(topic)
self.db.add(creator)
self.db.commit()
dct = {'status': 'ok'}
dct['data'] = {'action': 'active', 'count': len(up_users)}
self.write(dct)
self.cache.delete('topic:%s' % str(id))
return
def _calc_topic_impact(self):
if self.current_user.reputation < 2:
return 0
factor = int(options.up_factor_for_topic)
return factor * int(math.log(self.current_user.reputation))
def _calc_user_impact(self):
if self.current_user.reputation < 2:
return 0
factor = int(options.up_factor_for_user)
impact = factor * int(math.log(self.current_user.reputation))
return min(impact, int(options.up_max_for_user))
class DownTopicHandler(BaseHandler):
"""Down a topic will reduce impact of the topic,
and decrease reputation of the creator
"""
@require_user
def post(self, id):
topic = self.db.query(Topic).filter_by(id=id).first()
if not topic:
self.send_error(404)
return
user_id = self.current_user.id
if topic.user_id == user_id:
# you can't vote your own topic
dct = {'status': 'fail', 'msg': "cannot down vote your own topic"}
self.write(dct)
return
if user_id in topic.up_users:
# you can't down and up vote at the same time
dct = {'status': 'fail', 'msg': "cannot down vote your up topic"}
self.write(dct)
return
creator = self.db.query(Member).filter_by(id=topic.user_id).first()
down_users = list(topic.down_users)
if user_id in down_users:
#TODO: can you cancel a down vote ?
down_users.remove(user_id)
topic.downs = ','.join(str(i) for i in down_users)
topic.impact += self._calc_topic_impact()
creator.reputation += self._calc_user_impact()
self.db.add(creator)
self.db.add(topic)
self.db.commit()
dct = {'status': 'ok'}
dct['data'] = {'action': 'cancel', 'count': len(down_users)}
self.write(dct)
self.cache.delete('topic:%s' % str(id))
return
down_users.append(user_id)
topic.downs = ','.join(str(i) for i in down_users)
topic.impact -= self._calc_topic_impact()
creator.reputation -= self._calc_user_impact()
self.db.add(creator)
self.db.add(topic)
self.db.commit()
dct = {'status': 'ok'}
dct['data'] = {'action': 'active', 'count': len(down_users)}
self.write(dct)
self.cache.delete('topic:%s' % str(id))
return
def _calc_topic_impact(self):
if self.current_user.reputation < 2:
return 0
factor = int(options.down_factor_for_topic)
return factor * int(math.log(self.current_user.reputation))
def _calc_user_impact(self):
if self.current_user.reputation < 2:
return 0
factor = int(options.down_factor_for_user)
impact = factor * int(math.log(self.current_user.reputation))
return min(impact, int(options.down_max_for_user))
class AcceptReplyHandler(BaseHandler, NotifyMixin):
"""Vote for a reply will affect the topic impact and reply user's
reputation
"""
def _is_exist(self, topic_id, reply_id):
reply = self.db.query(Reply).filter_by(id=reply_id).first()
if not reply or reply.topic_id != int(topic_id):
return False
topic = self.db.query(Topic).filter_by(id=topic_id).first()
if not topic:
return False
return reply, topic
def _calc_user_impact(self):
if self.current_user.reputation < 2:
return 0
factor = int(options.accept_reply_factor_for_user)
impact = factor * int(math.log(self.current_user.reputation))
return min(impact, int(options.vote_max_for_user))
def post(self, topic_id, reply_id):
reply_topic = self._is_exist(topic_id, reply_id)
if not reply_topic:
self.send_error(404)
return
reply, topic = reply_topic
user_id = self.current_user.id
if user_id != topic.user_id:
dct = {'status': 'fail', 'msg': 'you are not topic owner'}
self.write(dct)
return
if user_id == reply.user_id:
dct = {'status': 'fail', 'msg': 'cannot accept your own reply'}
self.write(dct)
return
creator = self.db.query(Member).filter_by(id=reply.user_id).first()
if reply.accepted == 'y':
creator.reputation -= self._calc_user_impact()
reply.accepted = 'n'
self.db.add(creator)
self.db.add(reply)
self.db.commit()
self.cache.delete('ReplyListModule:%s:1' % topic.id)
dct = {'status': 'ok', 'data': 'cancel'}
self.write(dct)
return
creator.reputation += self._calc_user_impact()
topic_owner = self.db.query(Member).filter_by(id=topic.user_id).first()
topic_owner.reputation -= 1
reply.accepted = 'y'
self.db.add(reply)
self.db.add(creator)
self.db.add(topic_owner)
link = '/topic/%s' % topic.id
self.create_notify(reply.user_id, topic.title, reply.content,
link, 'accept')
self.db.commit()
self.cache.delete('ReplyListModule:%s:1' % topic.id)
dct = {'status': 'ok', 'data': 'active'}
self.write(dct)
return
handlers = [
('/api/topic/(\d+)/up', UpTopicHandler),
('/api/topic/(\d+)/down', DownTopicHandler),
('/api/topic/(\d+)/(\d+)/accept', AcceptReplyHandler),
]
| {"/june/app.py": ["/june/lib/util.py", "/june/config.py", "/june/urls.py"], "/june/handlers/node.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/handlers/account.py": ["/june/lib/handler.py", "/june/lib/util.py", "/june/lib/recaptcha.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/lib/handler.py": ["/june/models/mixin.py", "/june/lib/util.py"], "/june/handlers/front.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/dashboard/handlers.py": ["/june/lib/util.py", "/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"], "/june/backend/local.py": ["/june/backend/__init__.py"], "/june/models/__init__.py": ["/june/config.py"], "/june/models/mixin.py": ["/june/lib/decorators.py", "/june/models/__init__.py"], "/june/backend/upyun.py": ["/june/backend/__init__.py"], "/june/handlers/api.py": ["/june/lib/handler.py", "/june/lib/decorators.py", "/june/models/__init__.py", "/june/models/mixin.py"]} |
67,342 | Oopps/EMarket | refs/heads/main | /store_app/admin.py | from django.contrib import admin
# Register your models here.
from store_app.models import Product, ProductFeature, CustomFeature, ProductType
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
pass
@admin.register(ProductFeature)
class ProductFeatureAdmin(admin.ModelAdmin):
pass
@admin.register(CustomFeature)
class CustomFeatureAdmin(admin.ModelAdmin):
pass
@admin.register(ProductType)
class ProductTypeAdmin(admin.ModelAdmin):
pass
| {"/store_app/admin.py": ["/store_app/models.py"], "/store_app/views.py": ["/store_app/serializers.py", "/store_app/models.py"], "/store_app/serializers.py": ["/store_app/models.py"], "/store_app/urls.py": ["/store_app/views.py"]} |
67,343 | Oopps/EMarket | refs/heads/main | /store_app/models.py | from django.db import models
# Create your models here.
class ProductType(models.Model):
name = models.CharField(max_length=64)
def __str__(self):
return self.name
class Product(models.Model):
name = models.CharField(max_length=64)
description = models.TextField(max_length=512)
price = models.FloatField()
weight = models.FloatField()
p_type = models.ForeignKey(ProductType, on_delete=models.SET_NULL, null=True)
def __str__(self):
return self.name
class CustomFeature(models.Model):
name = models.CharField(max_length=32)
def __str__(self):
return self.name
class ProductFeature(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE)
feature = models.ForeignKey(CustomFeature, on_delete=models.CASCADE)
value = models.CharField(max_length=64, default='')
def __str__(self):
return str(self.product) + ' - ' + str(self.feature) + ': ' + self.value
| {"/store_app/admin.py": ["/store_app/models.py"], "/store_app/views.py": ["/store_app/serializers.py", "/store_app/models.py"], "/store_app/serializers.py": ["/store_app/models.py"], "/store_app/urls.py": ["/store_app/views.py"]} |
67,344 | Oopps/EMarket | refs/heads/main | /store_app/migrations/0003_auto_20201107_1643.py | # Generated by Django 3.1.3 on 2020-11-07 16:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store_app', '0002_auto_20201107_1642'),
]
operations = [
migrations.AlterField(
model_name='productfeature',
name='value',
field=models.CharField(default='', max_length=64),
),
]
| {"/store_app/admin.py": ["/store_app/models.py"], "/store_app/views.py": ["/store_app/serializers.py", "/store_app/models.py"], "/store_app/serializers.py": ["/store_app/models.py"], "/store_app/urls.py": ["/store_app/views.py"]} |
67,345 | Oopps/EMarket | refs/heads/main | /store_app/migrations/0004_auto_20201107_1656.py | # Generated by Django 3.1.3 on 2020-11-07 16:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('store_app', '0003_auto_20201107_1643'),
]
operations = [
migrations.CreateModel(
name='ProductType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
],
),
migrations.AddField(
model_name='product',
name='p_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='store_app.producttype'),
),
]
| {"/store_app/admin.py": ["/store_app/models.py"], "/store_app/views.py": ["/store_app/serializers.py", "/store_app/models.py"], "/store_app/serializers.py": ["/store_app/models.py"], "/store_app/urls.py": ["/store_app/views.py"]} |
67,346 | Oopps/EMarket | refs/heads/main | /store_app/views.py | from django.shortcuts import render
# Create your views here.
from rest_framework.response import Response
from rest_framework.views import APIView
from store_app.serializers import ProductSerializer
from store_app.models import Product, ProductFeature, CustomFeature, ProductType
class ProductView(APIView):
def get(self, request):
r_type = request.query_params.get('type', None)
if r_type is None:
return Response(ProductSerializer(Product.objects.all(), many=True).data)
else:
return Response(ProductSerializer(Product.objects.all().filter(p_type__name=r_type), many=True).data)
def post(self, request):
serializer = ProductSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
instance = serializer.save()
return Response(ProductSerializer(instance).data) | {"/store_app/admin.py": ["/store_app/models.py"], "/store_app/views.py": ["/store_app/serializers.py", "/store_app/models.py"], "/store_app/serializers.py": ["/store_app/models.py"], "/store_app/urls.py": ["/store_app/views.py"]} |
67,347 | Oopps/EMarket | refs/heads/main | /store_app/serializers.py | from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from store_app.models import Product, ProductFeature, CustomFeature, ProductType
class ProductSerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True)
name = serializers.CharField(max_length=64)
description = serializers.CharField(max_length=512)
price = serializers.FloatField()
weight = serializers.FloatField()
p_type = serializers.StringRelatedField(read_only=True)
def create(self, validated_data):
return Product.objects.create(**validated_data)
def update(self, instance, validated_data):
Product.objects.filter(pk=instance.id).update(**validated_data)
return Product.objects.get(pk=instance.pk) | {"/store_app/admin.py": ["/store_app/models.py"], "/store_app/views.py": ["/store_app/serializers.py", "/store_app/models.py"], "/store_app/serializers.py": ["/store_app/models.py"], "/store_app/urls.py": ["/store_app/views.py"]} |
67,348 | Oopps/EMarket | refs/heads/main | /store_app/migrations/0001_initial.py | # Generated by Django 3.1.3 on 2020-11-07 16:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CustomFeature',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32)),
('value', models.CharField(max_length=64)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('description', models.TextField(max_length=512)),
('price', models.FloatField()),
('weight', models.FloatField()),
],
),
migrations.CreateModel(
name='ProductFeature',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('feature', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='store_app.customfeature')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='store_app.product')),
],
),
]
| {"/store_app/admin.py": ["/store_app/models.py"], "/store_app/views.py": ["/store_app/serializers.py", "/store_app/models.py"], "/store_app/serializers.py": ["/store_app/models.py"], "/store_app/urls.py": ["/store_app/views.py"]} |
67,349 | Oopps/EMarket | refs/heads/main | /store_app/migrations/0002_auto_20201107_1642.py | # Generated by Django 3.1.3 on 2020-11-07 16:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store_app', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='customfeature',
name='value',
),
migrations.AddField(
model_name='productfeature',
name='value',
field=models.CharField(default=None, max_length=64),
preserve_default=False,
),
]
| {"/store_app/admin.py": ["/store_app/models.py"], "/store_app/views.py": ["/store_app/serializers.py", "/store_app/models.py"], "/store_app/serializers.py": ["/store_app/models.py"], "/store_app/urls.py": ["/store_app/views.py"]} |
67,350 | Oopps/EMarket | refs/heads/main | /store_app/urls.py | from django.conf.urls import url
from store_app.views import ProductView
urlpatterns = [
url(r'product/', ProductView.as_view())
] | {"/store_app/admin.py": ["/store_app/models.py"], "/store_app/views.py": ["/store_app/serializers.py", "/store_app/models.py"], "/store_app/serializers.py": ["/store_app/models.py"], "/store_app/urls.py": ["/store_app/views.py"]} |
67,356 | Levstyle/film-crawler | refs/heads/master | /spider.py | import requests
from datetime import datetime
import asyncio
class Spider:
def __init__(self, url, keep_days=7):
self.url = url
self.keep_days = keep_days
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0",
"Accept-Language": "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive"
}
self.requests = requests.Session()
self.requests.headers.update(self.headers)
self.now = datetime.now()
def crawl(self, url):
response = self.requests.get(url=url)
response.encoding = response.apparent_encoding
return response.text
def parser(self, url):
pass
@asyncio.coroutine
def __call__(self):
result = self.parser(self.crawl(self.url))
print("{} is done!".format(self.url))
return result
| {"/seehd.py": ["/spider.py"], "/main.py": ["/seehd.py", "/dytt8.py", "/piaohua.py"], "/dytt8.py": ["/spider.py"], "/piaohua.py": ["/spider.py"]} |
67,357 | Levstyle/film-crawler | refs/heads/master | /seehd.py | from bs4 import BeautifulSoup
from spider import Spider
from datetime import datetime
class SeeHD(Spider):
def __init__(self):
super().__init__(url="http://www.seehd.so/thread-2")
def parser(self, text):
soup = BeautifulSoup(text, 'html.parser')
tds = soup.select('td.subject')
results = []
for td in tds:
if td.select('i.icon_headtopic_3'): continue
date = td.select('p.info span')[0].text
title = td.select('p.title a')[-1].text
url = td.select('p.title a')[-1].attrs['href']
film = dict(date=date, url=url, title=title)
if (self.now - datetime.strptime(date, "%Y-%m-%d")).days <= self.keep_days:
results.append(film)
return results
if __name__ == "__main__":
SeeHD()()
| {"/seehd.py": ["/spider.py"], "/main.py": ["/seehd.py", "/dytt8.py", "/piaohua.py"], "/dytt8.py": ["/spider.py"], "/piaohua.py": ["/spider.py"]} |
67,358 | Levstyle/film-crawler | refs/heads/master | /main.py | from seehd import SeeHD
from dytt8 import Dytt8
from piaohua import PiaoHua
from prettytable import PrettyTable
from functools import reduce
import asyncio
def main():
movies = PrettyTable(["电影名", "地址", "时间"])
movies.align["电影名"] = "l"
movies.align["地址"] = "l"
movies.align["时间"] = "c"
tasks = [
asyncio.ensure_future(SeeHD()()),
asyncio.ensure_future(Dytt8()()),
asyncio.ensure_future(PiaoHua()())
]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))
films = reduce(lambda x, y: x + y, [task.result() for task in tasks])
for movie in films:
movies.add_row([movie['title'], movie['url'], movie['date']])
print(movies.get_string(sortby='时间', reversesort=True))
loop.close()
if __name__ == '__main__':
main()
| {"/seehd.py": ["/spider.py"], "/main.py": ["/seehd.py", "/dytt8.py", "/piaohua.py"], "/dytt8.py": ["/spider.py"], "/piaohua.py": ["/spider.py"]} |
67,359 | Levstyle/film-crawler | refs/heads/master | /dytt8.py | from bs4 import BeautifulSoup
from spider import Spider
from datetime import datetime
from urllib.parse import urljoin
class Dytt8(Spider):
def __init__(self):
super().__init__(url="https://www.dytt8.net/html/gndy/dyzz/index.html")
def parser(self, text):
soup = BeautifulSoup(text, 'html.parser')
tables = soup.select('div.co_content8 table')
results = []
for mv in tables:
title = mv.select('a.ulink')[0].text
url = mv.select('a.ulink')[0].attrs['href']
date = mv.find("font").text.split()[0][3:]
film = dict(date=date, url=urljoin(self.url, url), title=title)
if (self.now - datetime.strptime(date, "%Y-%m-%d")).days <= self.keep_days:
results.append(film)
return results
if __name__ == "__main__":
Dytt8()()
| {"/seehd.py": ["/spider.py"], "/main.py": ["/seehd.py", "/dytt8.py", "/piaohua.py"], "/dytt8.py": ["/spider.py"], "/piaohua.py": ["/spider.py"]} |
67,360 | Levstyle/film-crawler | refs/heads/master | /piaohua.py | from bs4 import BeautifulSoup
from spider import Spider
from datetime import datetime
from urllib.parse import urljoin
class PiaoHua(Spider):
def __init__(self):
super().__init__(url="https://www.piaohua.com/html/dianying.html")
def parser(self, text):
soup = BeautifulSoup(text, 'html.parser')
uls = soup.select('ul.ul-imgtxt1')[0].select('li')
results = []
for ul in uls:
url = urljoin(self.url, ul.find('a').attrs['href'])
title = ul.select('div.txt>h3')[0].text
date = ul.select('span')[0].text
film = dict(date=date, url=url, title=title)
if (self.now - datetime.strptime(date, "%Y-%m-%d")).days <= self.keep_days:
results.append(film)
return results
if __name__ == "__main__":
piaohua = PiaoHua()
piaohua.parser(piaohua.crawl(piaohua.url))
| {"/seehd.py": ["/spider.py"], "/main.py": ["/seehd.py", "/dytt8.py", "/piaohua.py"], "/dytt8.py": ["/spider.py"], "/piaohua.py": ["/spider.py"]} |
67,370 | chauvinSimon/scenario_runner | refs/heads/master | /srunner/scenarios/object_crash_intersection.py | #!/usr/bin/env python
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
Object crash with prior vehicle action scenario:
The scenario realizes the user controlled ego vehicle
moving along the road and encounters a cyclist ahead after taking a right and a left turn.
"""
import py_trees
import carla
from srunner.scenariomanager.atomic_scenario_behavior import *
from srunner.scenariomanager.atomic_scenario_criteria import *
from srunner.scenariomanager.timer import TimeOut
from srunner.scenarios.basic_scenario import *
VEHICLE_TURNING_SCENARIOS = [
"VehicleTurningRight",
"VehicleTurningLeft"
]
class VehicleTurningRight(BasicScenario):
"""
This class holds everything required for a simple object crash
with prior vehicle action involving a vehicle and a cyclist.
The ego vehicle is passing through a road and encounters
a cyclist after taking a right turn.
"""
category = "VehicleTurning"
timeout = 90
# ego vehicle parameters
_ego_vehicle_velocity_allowed = 30
_ego_driven_distance = 55
_ego_acceptable_distance = 35
# other vehicle parameters
_other_actor_target_velocity = 10
_trigger_distance_from_ego = 14
_other_actor_max_throttle = 1.0
_other_actor_max_brake = 1.0
_location_of_collision = carla.Location(x=93.1, y=44.8, z=39)
def __init__(self, world, ego_vehicle, other_actors, town, randomize=False, debug_mode=False, config=None):
"""
Setup all relevant parameters and create scenario
"""
super(VehicleTurningRight, self).__init__("VehicleTurningRight",
ego_vehicle,
other_actors,
town,
world,
debug_mode)
def _create_behavior(self):
"""
After invoking this scenario, cyclist will wait for the user
controlled vehicle to enter the in the trigger distance region,
the cyclist starts crossing the road once the condition meets,
ego vehicle has to avoid the crash after a right turn, but
continue driving after the road is clear.If this does not happen
within 90 seconds, a timeout stops the scenario.
"""
# leaf nodes
trigger_distance = InTriggerDistanceToVehicle(
self.other_actors[0],
self.ego_vehicle,
self._trigger_distance_from_ego)
stop_other_actor = StopVehicle(
self.other_actors[0],
self._other_actor_max_brake)
timeout_other_actor = TimeOut(5)
start_other = KeepVelocity(
self.other_actors[0],
self._other_actor_target_velocity)
trigger_other = InTriggerRegion(
self.other_actors[0],
85.5, 86.5,
41, 43)
stop_other = StopVehicle(
self.other_actors[0],
self._other_actor_max_brake)
timeout_other = TimeOut(3)
sync_arrival = SyncArrival(
self.other_actors[0], self.ego_vehicle, self._location_of_collision)
sync_arrival_stop = InTriggerDistanceToVehicle(self.other_actors[0],
self.ego_vehicle,
6)
# non leaf nodes
root = py_trees.composites.Parallel(
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
scenario_sequence = py_trees.composites.Sequence()
keep_velocity_other = py_trees.composites.Parallel(
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
sync_arrival_parallel = py_trees.composites.Parallel(
"Synchronize arrival times",
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
# building the tress
root.add_child(scenario_sequence)
scenario_sequence.add_child(trigger_distance)
scenario_sequence.add_child(sync_arrival_parallel)
scenario_sequence.add_child(stop_other_actor)
scenario_sequence.add_child(timeout_other_actor)
scenario_sequence.add_child(keep_velocity_other)
scenario_sequence.add_child(stop_other)
scenario_sequence.add_child(timeout_other)
sync_arrival_parallel.add_child(sync_arrival)
sync_arrival_parallel.add_child(sync_arrival_stop)
keep_velocity_other.add_child(start_other)
keep_velocity_other.add_child(trigger_other)
return root
def _create_test_criteria(self):
"""
A list of all test criteria will be created that is later used
in parallel behavior tree.
"""
criteria = []
max_velocity_criterion = MaxVelocityTest(
self.ego_vehicle,
self._ego_vehicle_velocity_allowed)
collision_criterion = CollisionTest(self.ego_vehicle)
driven_distance_criterion = DrivenDistanceTest(
self.ego_vehicle,
self._ego_driven_distance,
distance_acceptable=self._ego_acceptable_distance)
criteria.append(max_velocity_criterion)
criteria.append(collision_criterion)
criteria.append(driven_distance_criterion)
return criteria
class VehicleTurningLeft(BasicScenario):
"""
This class holds everything required for a simple object crash
with prior vehicle action involving a vehicle and a cyclist.
The ego vehicle is passing through a road and encounters
a cyclist after taking a left turn.
"""
category = "VehicleTurning"
timeout = 90
# ego vehicle parameters
_ego_vehicle_velocity_allowed = 30
_ego_driven_distance = 60
_ego_acceptable_distance = 40
# other vehicle parameters
_other_actor_target_velocity = 10
_trigger_distance_from_ego = 23
_other_actor_max_throttle = 1.0
_other_actor_max_brake = 1.0
_location_of_collision = carla.Location(x=88.6, y=75.8, z=38)
def __init__(self, world, ego_vehicle, other_actors, town, randomize=False, debug_mode=False):
"""
Setup all relevant parameters and create scenario
"""
super(VehicleTurningLeft, self).__init__("VehicleTurningLeft",
ego_vehicle,
other_actors,
town,
world,
debug_mode)
def _create_behavior(self):
"""
After invoking this scenario, cyclist will wait for the user
controlled vehicle to enter the in the trigger distance region,
the cyclist starts crossing the road once the condition meets,
ego vehicle has to avoid the crash after a left turn, but
continue driving after the road is clear.If this does not happen
within 90 seconds, a timeout stops the scenario.
"""
# leaf nodes
trigger_distance = InTriggerDistanceToVehicle(
self.other_actors[0],
self.ego_vehicle,
self._trigger_distance_from_ego)
stop_other_actor = StopVehicle(
self.other_actors[0],
self._other_actor_max_brake)
timeout_other_actor = TimeOut(5)
start_other = KeepVelocity(
self.other_actors[0],
self._other_actor_target_velocity)
trigger_other = InTriggerRegion(
self.other_actors[0],
95, 96,
78, 79)
stop_other = StopVehicle(
self.other_actors[0],
self._other_actor_max_brake)
timeout_other = TimeOut(3)
sync_arrival = SyncArrival(
self.other_actors[0], self.ego_vehicle, self._location_of_collision)
sync_arrival_stop = InTriggerDistanceToVehicle(self.other_actors[0],
self.ego_vehicle,
6)
# non leaf nodes
root = py_trees.composites.Parallel(
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
scenario_sequence = py_trees.composites.Sequence()
keep_velocity_other = py_trees.composites.Parallel(
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
sync_arrival_parallel = py_trees.composites.Parallel(
"Synchronize arrival times",
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
# building the tress
root.add_child(scenario_sequence)
scenario_sequence.add_child(trigger_distance)
scenario_sequence.add_child(sync_arrival_parallel)
scenario_sequence.add_child(stop_other_actor)
scenario_sequence.add_child(timeout_other_actor)
scenario_sequence.add_child(keep_velocity_other)
scenario_sequence.add_child(stop_other)
scenario_sequence.add_child(timeout_other)
sync_arrival_parallel.add_child(sync_arrival)
sync_arrival_parallel.add_child(sync_arrival_stop)
keep_velocity_other.add_child(start_other)
keep_velocity_other.add_child(trigger_other)
return root
def _create_test_criteria(self):
"""
A list of all test criteria will be created that is later used
in parallel behavior tree.
"""
criteria = []
max_velocity_criterion = MaxVelocityTest(
self.ego_vehicle,
self._ego_vehicle_velocity_allowed)
collision_criterion = CollisionTest(self.ego_vehicle)
driven_distance_criterion = DrivenDistanceTest(
self.ego_vehicle,
self._ego_driven_distance,
distance_acceptable=self._ego_acceptable_distance)
criteria.append(max_velocity_criterion)
criteria.append(collision_criterion)
criteria.append(driven_distance_criterion)
return criteria
| {"/srunner/scenarios/object_crash_intersection.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/opposite_vehicle_taking_priority.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/challenge/challenge_evaluator.py": ["/srunner/challenge/envs/sensor_interface.py", "/srunner/scenarios/challenge_basic.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/challenge_basic.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenariomanager/atomic_scenario_behavior.py": ["/srunner/scenariomanager/carla_data_provider.py"], "/srunner/scenariomanager/scenario_manager.py": ["/srunner/scenariomanager/carla_data_provider.py", "/srunner/scenariomanager/traffic_events.py"], "/scenario_runner.py": ["/srunner/scenarios/follow_leading_vehicle.py", "/srunner/scenarios/opposite_vehicle_taking_priority.py", "/srunner/scenarios/object_crash_vehicle.py", "/srunner/scenarios/object_crash_intersection.py", "/srunner/scenarios/control_loss.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/basic_scenario.py": ["/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/control_loss.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/follow_leading_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/object_crash_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"]} |
67,371 | chauvinSimon/scenario_runner | refs/heads/master | /srunner/scenariomanager/traffic_events.py | from enum import Enum
class TrafficEventType(Enum):
"""
This enum represents different traffic events that occur during driving.
"""
NORMAL_DRIVING = 0
COLLISION_STATIC = 1
COLLISION_VEHICLE = 2
COLLISION_PEDESTRIAN = 3
ROUTE_DEVIATION = 4
ROUTE_COMPLETION = 5
ROUTE_COMPLETED = 6
TRAFFIC_LIGHT_INFRACTION = 7
WRONG_WAY_INFRACTION = 8
class TrafficEvent(object):
def __init__(self, type, message=None, dict=None):
"""
Initialize object
:param type: TrafficEventType defining the type of traffic event
:param message: optional message to inform users of the event
:param dict: optional dictionary with arbitrary keys and values
"""
self._type = type
self._message = message
self._dict = dict
def get_type(self):
return self._type
def get_message(self):
if self._message:
return self._message
else:
return ""
def set_message(self, message):
self._message = message
def get_dict(self):
return self._dict
def set_dict(self, dict):
self._dict = dict | {"/srunner/scenarios/object_crash_intersection.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/opposite_vehicle_taking_priority.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/challenge/challenge_evaluator.py": ["/srunner/challenge/envs/sensor_interface.py", "/srunner/scenarios/challenge_basic.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/challenge_basic.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenariomanager/atomic_scenario_behavior.py": ["/srunner/scenariomanager/carla_data_provider.py"], "/srunner/scenariomanager/scenario_manager.py": ["/srunner/scenariomanager/carla_data_provider.py", "/srunner/scenariomanager/traffic_events.py"], "/scenario_runner.py": ["/srunner/scenarios/follow_leading_vehicle.py", "/srunner/scenarios/opposite_vehicle_taking_priority.py", "/srunner/scenarios/object_crash_vehicle.py", "/srunner/scenarios/object_crash_intersection.py", "/srunner/scenarios/control_loss.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/basic_scenario.py": ["/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/control_loss.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/follow_leading_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/object_crash_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"]} |
67,372 | chauvinSimon/scenario_runner | refs/heads/master | /srunner/scenarios/opposite_vehicle_taking_priority.py | #!/usr/bin/env python
# Copyright (c) 2018-2019 Intel Labs.
# authors: Fabian Oboril (fabian.oboril@intel.com)
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
Scenarios in which another (opposite) vehicle 'illegally' takes
priority, e.g. by running a red traffic light.
"""
from __future__ import print_function
import sys
import py_trees
import carla
from srunner.scenariomanager.atomic_scenario_behavior import *
from srunner.scenariomanager.atomic_scenario_criteria import *
from srunner.scenarios.basic_scenario import *
RUNNING_RED_LIGHT_SCENARIOS = [
"OppositeVehicleRunningRedLight"
]
class OppositeVehicleRunningRedLight(BasicScenario):
"""
This class holds everything required for a scenario,
in which an other vehicle takes priority from the ego
vehicle, by running a red traffic light (while the ego
vehicle has green)
"""
category = "RunningRedLight"
timeout = 180 # Timeout of scenario in seconds
# ego vehicle parameters
_ego_max_velocity_allowed = 20 # Maximum allowed velocity [m/s]
_ego_avg_velocity_expected = 4 # Average expected velocity [m/s]
_ego_expected_driven_distance = 88 # Expected driven distance [m]
_ego_distance_to_traffic_light = 15 # Trigger distance to traffic light [m]
_ego_end_position = carla.Location(x=-3, y=-90, z=0) # End position
_ego_distance_to_end_position = 5 # Allowed distance to end position [m]
_intersection_location = carla.Location(x=-3, y=-150, z=0)
# other vehicle
_other_actor_target_velocity = 15 # Target velocity of other vehicle
_other_actor_max_brake = 1.0 # Maximum brake of other vehicle
_other_actor_distance = 30 # Distance the other vehicle should drive
_traffic_light_location = carla.Location(x=-11.5, y=-125.0, z=0.15)
_traffic_light = None
_location_of_collision = carla.Location(x=0, y=-135, z=1)
def __init__(self, world, ego_vehicle, other_actors, town, randomize=False, debug_mode=False, config=None):
"""
Setup all relevant parameters and create scenario
and instantiate scenario manager
"""
for actor in world.get_actors().filter('traffic.traffic_light'):
if actor.get_location().distance(self._traffic_light_location) < 1.0:
self._traffic_light = actor
if self._traffic_light is None:
print("No traffic light for the given location found")
sys.exit(-1)
super(OppositeVehicleRunningRedLight, self).__init__("OppositeVehicleRunningRedLight",
ego_vehicle,
other_actors,
town,
world,
debug_mode)
def _create_behavior(self):
"""
Scenario behavior:
The other vehicle waits until the ego vehicle is close enough to the
intersection and that its own traffic light is red. Then, it will start
driving and 'illegally' cross the intersection. After a short distance
it should stop again, outside of the intersection. The ego vehicle has
to avoid the crash, but continue driving after the intersection is clear.
If this does not happen within 120 seconds, a timeout stops the scenario
"""
# start condition
startcondition = InTriggerDistanceToLocation(
self.ego_vehicle,
self._intersection_location,
self._ego_distance_to_traffic_light,
name="Waiting for start position")
# wait until traffic light for ego vehicle is green
wait_for_green = WaitForTrafficLightState(self._traffic_light, "Green")
sync_arrival_parallel = py_trees.composites.Parallel(
"Synchronize arrival times",
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
sync_arrival = SyncArrival(
self.other_actors[0], self.ego_vehicle, self._location_of_collision)
sync_arrival_stop = InTriggerDistanceToVehicle(self.other_actors[0],
self.ego_vehicle,
15)
sync_arrival_parallel.add_child(sync_arrival)
sync_arrival_parallel.add_child(sync_arrival_stop)
keep_velocity_for_distance = py_trees.composites.Parallel(
"Keep velocity for distance",
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
keep_velocity = KeepVelocity(
self.other_actors[0],
self._other_actor_target_velocity)
keep_velocity_distance = DriveDistance(
self.other_actors[0],
self._other_actor_distance,
name="Distance")
keep_velocity_for_distance.add_child(keep_velocity)
keep_velocity_for_distance.add_child(keep_velocity_distance)
# finally wait that ego vehicle reached target position
wait = InTriggerDistanceToLocation(
self.ego_vehicle,
self._ego_end_position,
self._ego_distance_to_end_position,
name="Waiting for end position")
# Build behavior tree
sequence = py_trees.composites.Sequence("Sequence Behavior")
sequence.add_child(startcondition)
sequence.add_child(wait_for_green)
sequence.add_child(sync_arrival_parallel)
sequence.add_child(keep_velocity_for_distance)
sequence.add_child(wait)
return sequence
def _create_test_criteria(self):
"""
A list of all test criteria will be created that is later used
in parallel behavior tree.
"""
criteria = []
max_velocity_criterion = MaxVelocityTest(
self.ego_vehicle,
self._ego_max_velocity_allowed,
optional=True)
collision_criterion = CollisionTest(self.ego_vehicle)
driven_distance_criterion = DrivenDistanceTest(
self.ego_vehicle,
self._ego_expected_driven_distance)
criteria.append(max_velocity_criterion)
criteria.append(collision_criterion)
criteria.append(driven_distance_criterion)
# Add the collision and lane checks for all vehicles as well
for vehicle in self.other_actors:
collision_criterion = CollisionTest(vehicle)
criteria.append(collision_criterion)
return criteria
def __del__(self):
self._traffic_light = None
| {"/srunner/scenarios/object_crash_intersection.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/opposite_vehicle_taking_priority.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/challenge/challenge_evaluator.py": ["/srunner/challenge/envs/sensor_interface.py", "/srunner/scenarios/challenge_basic.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/challenge_basic.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenariomanager/atomic_scenario_behavior.py": ["/srunner/scenariomanager/carla_data_provider.py"], "/srunner/scenariomanager/scenario_manager.py": ["/srunner/scenariomanager/carla_data_provider.py", "/srunner/scenariomanager/traffic_events.py"], "/scenario_runner.py": ["/srunner/scenarios/follow_leading_vehicle.py", "/srunner/scenarios/opposite_vehicle_taking_priority.py", "/srunner/scenarios/object_crash_vehicle.py", "/srunner/scenarios/object_crash_intersection.py", "/srunner/scenarios/control_loss.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/basic_scenario.py": ["/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/control_loss.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/follow_leading_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/object_crash_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"]} |
67,373 | chauvinSimon/scenario_runner | refs/heads/master | /srunner/challenge/autoagents/MyAgentV0.py | import carla
import matplotlib.pyplot as plt
import json
import pathlib
import numpy as np
from srunner.challenge.autoagents.autonomous_agent import AutonomousAgent
class MyAgentV0(AutonomousAgent):
def setup(self, path_to_conf_file):
"""
Initialize everything needed by your agent and set the track attribute to the right type:
Track.ALL_SENSORS : LIDAR, cameras, GPS and speed sensor allowed
Track.CAMERAS : Only cameras and GPS allowed
Track.ALL_SENSORS_HDMAP_WAYPOINTS : All sensors and HD Map and waypoints allowed
Track.SCENE_LAYOUT : No sensors allowed, the agent receives a high-level representation of the scene.
"""
path_to_conf_file = pathlib.Path(path_to_conf_file)
if not path_to_conf_file.exists():
print("ERROR: not found path_to_conf_file = {}".format(path_to_conf_file))
exit()
with open(path_to_conf_file, "r") as config_file:
config = json.load(config_file)
# self.track = {Track.ALL_SENSORS, Track.CAMERAS, Track.ALL_SENSORS_HDMAP_WAYPOINTS, Track.SCENE_LAYOUT}
# self.track = Track.ALL_SENSORS_HDMAP_WAYPOINTS
def sensors(self):
"""
Define the sensor suite required by the agent
:return: a list containing the required sensors in the following format:
[
{'type': 'sensor.camera.rgb', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,
'width': 300, 'height': 200, 'fov': 100, 'id': 'Left'},
{'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,
'width': 300, 'height': 200, 'fov': 100, 'id': 'Right'},
{'type': 'sensor.lidar.ray_cast', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'yaw': 0.0, 'pitch': 0.0, 'roll': 0.0,
'id': 'LIDAR'}
"""
sensors = [{'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,
'width': 800, 'height': 600, 'fov': 100, 'id': 'Center'},
{'type': 'sensor.camera.rgb', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0,
'yaw': -45.0, 'width': 800, 'height': 600, 'fov': 100, 'id': 'Left'},
{'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 45.0,
'width': 800, 'height': 600, 'fov': 100, 'id': 'Right'},
{'type': 'sensor.lidar.ray_cast', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0,
'yaw': -45.0, 'id': 'LIDAR'},
{'type': 'sensor.other.gnss', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'id': 'GPS'},
{'type': 'sensor.speedometer', 'reading_frequency': 25, 'id': 'speed'},
{'type': 'sensor.hd_map', 'reading_frequency': 1, 'id': 'hdmap'}
]
return sensors
def run_step(self, input_data):
print("=====================>")
for key, val in input_data.items():
if hasattr(val[1], 'shape'):
shape = val[1].shape
print("[{} -- {:06d}] with shape {}".format(key, val[0], shape))
print("<=====================")
gps_input = input_data["GPS"]
gps_coords = list(gps_input[1])
print(gps_coords)
print(self.trace_path)
self.trace_path.append(gps_coords)
self.x.append(gps_coords[0])
self.y.append(gps_coords[1])
# self.sc.set_offsets(np.c_[self.x, self.y])
# self.sc.set_xdata(self.x)
# self.sc.set_ydata(self.y)
# self.fig.canvas.draw_idle()
self.ax.scatter(self.x, self.y)
plt.show()
plt.pause(0.1)
plt.savefig("temp.png")
# plt.scatter([e[0]for e in self.trace_path], [e[1]for e in self.trace_path])
# plt.gca().set_aspect('equal', adjustable="box")
# plt.grid()
# DO SOMETHING SMART
# RETURN CONTROL
control = carla.VehicleControl()
control.steer = 0.0
control.throttle = 1.0
control.brake = 0.0
control.hand_brake = False
return control
| {"/srunner/scenarios/object_crash_intersection.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/opposite_vehicle_taking_priority.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/challenge/challenge_evaluator.py": ["/srunner/challenge/envs/sensor_interface.py", "/srunner/scenarios/challenge_basic.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/challenge_basic.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenariomanager/atomic_scenario_behavior.py": ["/srunner/scenariomanager/carla_data_provider.py"], "/srunner/scenariomanager/scenario_manager.py": ["/srunner/scenariomanager/carla_data_provider.py", "/srunner/scenariomanager/traffic_events.py"], "/scenario_runner.py": ["/srunner/scenarios/follow_leading_vehicle.py", "/srunner/scenarios/opposite_vehicle_taking_priority.py", "/srunner/scenarios/object_crash_vehicle.py", "/srunner/scenarios/object_crash_intersection.py", "/srunner/scenarios/control_loss.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/basic_scenario.py": ["/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/control_loss.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/follow_leading_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/object_crash_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"]} |
67,374 | chauvinSimon/scenario_runner | refs/heads/master | /srunner/challenge/challenge_evaluator.py | #!/usr/bin/env python
# Copyright (c) 2018-2019 Intel Labs.
# authors: German Ros (german.ros@intel.com)
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
CARLA Challenge Evaluator
Provisional code to evaluate Autonomous Agents for the CARLA Autonomous Driving challenge
"""
from __future__ import print_function
import argparse
from argparse import RawTextHelpFormatter
from datetime import datetime
import importlib
import random
import sys
import time
import carla
from agents.navigation.local_planner import RoadOption
from srunner.challenge.envs.server_manager import ServerManagerBinary, ServerManagerDocker
from srunner.challenge.envs.sensor_interface import CallBack, Speedometer, HDMapReader
from srunner.scenarios.challenge_basic import *
from srunner.scenarios.config_parser import *
from srunner.scenariomanager.scenario_manager import ScenarioManager
# Dictionary of supported scenarios.
# key = Name of config file in configs/
# value = List as defined in the scenario module
SCENARIOS = {
"ChallengeBasic": CHALLENGE_BASIC_SCENARIOS
}
class ChallengeEvaluator(object):
"""
Provisional code to evaluate AutonomousAgent performance
"""
ego_vehicle = None
actors = []
# Tunable parameters
client_timeout = 15.0 # in seconds
wait_for_world = 10.0 # in seconds
# CARLA world and scenario handlers
world = None
manager = None
def __init__(self, args):
self.output_scenario = []
# first we instantiate the Agent
module_name = os.path.basename(args.agent).split('.')[0]
module_spec = importlib.util.spec_from_file_location(module_name, args.agent)
self.module_agent = importlib.util.module_from_spec(module_spec)
module_spec.loader.exec_module(self.module_agent)
self._sensors_list = []
self._hop_resolution = 2.0
# instantiate a CARLA server manager
if args.use_docker:
self._carla_server = ServerManagerDocker({'DOCKER_VERSION': args.docker_version})
else:
self._carla_server = ServerManagerBinary({'CARLA_SERVER': "{}/CarlaUE4.sh".format(args.carla_root)})
def __del__(self):
"""
Cleanup and delete actors, ScenarioManager and CARLA world
"""
self.cleanup(True)
if self.manager is not None:
del self.manager
if self.world is not None:
del self.world
@staticmethod
def get_scenario_class_or_fail(scenario):
"""
Get scenario class by scenario name
If scenario is not supported or not found, exit script
"""
for scenarios in SCENARIOS.values():
if scenario in scenarios:
if scenario in globals():
return globals()[scenario]
print("Scenario '{}' not supported ... Exiting".format(scenario))
sys.exit(-1)
def cleanup(self, ego=False):
"""
Remove and destroy all actors
"""
# We need enumerate here, otherwise the actors are not properly removed
for i, _ in enumerate(self.actors):
if self.actors[i] is not None:
self.actors[i].destroy()
self.actors[i] = None
self.actors = []
for i, _ in enumerate(self._sensors_list):
if self._sensors_list[i] is not None:
self._sensors_list[i].destroy()
self._sensors_list[i] = None
self._sensors_list = []
if ego and self.ego_vehicle is not None:
self.ego_vehicle.destroy()
self.ego_vehicle = None
def setup_vehicle(self, model, spawn_point, hero=False, autopilot=False, random_location=False):
"""
Function to setup the most relevant vehicle parameters,
incl. spawn point and vehicle model.
"""
blueprint_library = self.world.get_blueprint_library()
# Get vehicle by model
blueprint = random.choice(blueprint_library.filter(model))
if hero:
blueprint.set_attribute('role_name', 'hero')
else:
blueprint.set_attribute('role_name', 'scenario')
if random_location:
spawn_points = list(self.world.get_map().get_spawn_points())
random.shuffle(spawn_points)
for spawn_point in spawn_points:
vehicle = self.world.try_spawn_actor(blueprint, spawn_point)
if vehicle:
break
else:
vehicle = self.world.try_spawn_actor(blueprint, spawn_point)
if vehicle is None:
raise Exception(
"Error: Unable to spawn vehicle {} at {}".format(model, spawn_point))
else:
# Let's deactivate the autopilot of the vehicle
vehicle.set_autopilot(autopilot)
return vehicle
def setup_sensors(self, sensors, vehicle):
"""
Create the sensors defined by the user and attach them to the ego-vehicle
:param sensors: list of sensors
:param vehicle: ego vehicle
:return:
"""
bp_library = self.world.get_blueprint_library()
for sensor_spec in sensors:
# These are the pseudosensors (not spawned)
if sensor_spec['type'].startswith('sensor.speedometer'):
# The speedometer pseudo sensor is created directly here
sensor = Speedometer(vehicle, sensor_spec['reading_frequency'])
elif sensor_spec['type'].startswith('sensor.hd_map'):
# The HDMap pseudo sensor is created directly here
sensor = HDMapReader(vehicle, sensor_spec['reading_frequency'])
# These are the sensors spawned on the carla world
else:
bp = bp_library.find(sensor_spec['type'])
if sensor_spec['type'].startswith('sensor.camera'):
bp.set_attribute('image_size_x', str(sensor_spec['width']))
bp.set_attribute('image_size_y', str(sensor_spec['height']))
bp.set_attribute('fov', str(sensor_spec['fov']))
sensor_location = carla.Location(x=sensor_spec['x'], y=sensor_spec['y'],
z=sensor_spec['z'])
sensor_rotation = carla.Rotation(pitch=sensor_spec['pitch'],
roll=sensor_spec['roll'],
yaw=sensor_spec['yaw'])
elif sensor_spec['type'].startswith('sensor.lidar'):
bp.set_attribute('range', '5000')
sensor_location = carla.Location(x=sensor_spec['x'], y=sensor_spec['y'],
z=sensor_spec['z'])
sensor_rotation = carla.Rotation(pitch=sensor_spec['pitch'],
roll=sensor_spec['roll'],
yaw=sensor_spec['yaw'])
elif sensor_spec['type'].startswith('sensor.other.gnss'):
sensor_location = carla.Location(x=sensor_spec['x'], y=sensor_spec['y'],
z=sensor_spec['z'])
sensor_rotation = carla.Rotation()
# create sensor
sensor_transform = carla.Transform(sensor_location, sensor_rotation)
sensor = self.world.spawn_actor(bp, sensor_transform,
vehicle)
# setup callback
sensor.listen(CallBack(sensor_spec['id'], sensor, self.agent_instance.sensor_interface))
self._sensors_list.append(sensor)
# check that all sensors have initialized their data structure
while not self.agent_instance.all_sensors_ready():
time.sleep(0.1)
def prepare_actors(self, config):
"""
Spawn or update all scenario actors according to
their parameters provided in config
"""
# If ego_vehicle already exists, just update location
# Otherwise spawn ego vehicle
if self.ego_vehicle is None:
self.ego_vehicle = self.setup_vehicle(config.ego_vehicle.model, config.ego_vehicle.transform, hero=True)
else:
self.ego_vehicle.set_transform(config.ego_vehicle.transform)
# setup sensors
self.setup_sensors(self.agent_instance.sensors(), self.ego_vehicle)
# spawn all other actors
for actor in config.other_actors:
new_actor = self.setup_vehicle(actor.model, actor.transform, hero=False, autopilot=actor.autopilot,
random_location=actor.random_location)
self.actors.append(new_actor)
def analyze_scenario(self, args, config, final_summary=False):
"""
Provide feedback about success/failure of a scenario
"""
result, score, return_message = self.manager.analyze_scenario_challenge()
self.output_scenario.append((result, score, return_message))
# show results stoud
print(return_message)
# save results in file
current_time = str(datetime.now().strftime('%Y-%m-%d-%H-%M-%S'))
if args.file:
filename = config.name + current_time + ".txt"
with open(filename, "a+") as fd:
fd.write(return_message)
def final_summary(self, args):
return_message = ""
total_scenarios = len(self.output_scenario)
total_score = 0.0
for item in self.output_scenario:
total_score += item[1] / float(total_scenarios)
return_message += ("\n" + item[2])
avg_message = "\n==================================\n==[Avg. score = {:.2f}]".format(total_score)
avg_message += "\n=================================="
return_message = avg_message + return_message
print(avg_message)
if args.file:
filename = "results.txt"
with open(filename, "a+") as fd:
fd.write(return_message)
def draw_waypoints(self, waypoints, vertical_shift, persistency=-1):
"""
Draw a list of waypoints at a certain height given in vertical_shift.
:param waypoints: list or iterable container with the waypoints to draw
:param vertical_shift: height in meters
:return:
"""
for w in waypoints:
wp = w + carla.Location(z=vertical_shift)
self.world.debug.draw_point(wp, size=0.1, color=carla.Color(0, 255, 0), life_time=persistency)
def _get_latlon_ref(self):
"""
Convert from waypoints world coordinates to CARLA GPS coordinates
:return: tuple with lat and lon coordinates
"""
xodr = self.world.get_map().to_opendrive()
tree = ET.ElementTree(ET.fromstring(xodr))
lat_ref = 0
lon_ref = 0
for opendrive in tree.iter("OpenDRIVE"):
for header in opendrive.iter("header"):
for georef in header.iter("geoReference"):
if georef:
str_list = georef.text.split(' ')
lat_ref = float(str_list[0].split('=')[1])
lon_ref = float(str_list[1].split('=')[1])
else:
lat_ref = 42.0
lon_ref = 2.0
return lat_ref, lon_ref
def _location_to_gps(self, lat_ref, lon_ref, location):
"""
Convert from world coordinates to GPS coordinates
:param lat_ref: latitude reference for the current map
:param lon_ref: longitude reference for the current map
:param location: location to translate
:return: dictionary with lat, lon and height
"""
EARTH_RADIUS_EQUA = 6378137.0
scale = math.cos(lat_ref * math.pi / 180.0)
mx = scale * lon_ref * math.pi * EARTH_RADIUS_EQUA / 180.0
my = scale * EARTH_RADIUS_EQUA * math.log(math.tan((90.0 + lat_ref) * math.pi / 360.0))
mx += location.x
my += location.y
lon = mx * 180.0 / (math.pi * EARTH_RADIUS_EQUA * scale)
lat = 360.0 * math.atan(math.exp(my / (EARTH_RADIUS_EQUA * scale))) / math.pi - 90.0
z = location.z
return {'lat':lat, 'lon':lon, 'z':z}
def compress_route(self, route, start, end, threshold=10.0):
compressed_route = []
compressed_route.append((start, RoadOption.LANEFOLLOW))
current_waypoint = start
current_connection = RoadOption.LANEFOLLOW
for next_waypoint, next_connection in route:
if next_connection != current_connection or current_waypoint.distance(next_waypoint) > threshold:
compressed_route.append((next_waypoint, next_connection))
current_waypoint = next_waypoint
current_connection = next_connection
compressed_route.append((end, RoadOption.LANEFOLLOW))
return compressed_route
def location_route_to_gps(self, route, lat_ref, lon_ref):
gps_route = []
for location, connection in route:
gps_coord = self._location_to_gps(lat_ref, lon_ref, location)
gps_route.append((gps_coord, connection))
return gps_route
def run(self, args):
"""
Run all scenarios according to provided commandline args
"""
# Prepare CARLA server
self._carla_server.reset(args.host, args.port)
self._carla_server.wait_until_ready()
# Setup and run the scenarios for repetition times
for _ in range(int(args.repetitions)):
# Load the scenario configurations provided in the config file
scenario_configurations = None
if args.scenario.startswith("group:"):
scenario_configurations = parse_scenario_configuration(args.scenario, args.scenario)
else:
scenario_config_file = find_scenario_config(args.scenario)
if scenario_config_file is None:
print("Configuration for scenario {} cannot be found!".format(args.scenario))
continue
scenario_configurations = parse_scenario_configuration(scenario_config_file, args.scenario)
# Execute each configuration
for config in scenario_configurations:
# create agent instance
self.agent_instance = getattr(self.module_agent, self.module_agent.__name__)(args.config)
# Prepare scenario
print("Preparing scenario: " + config.name)
scenario_class = ChallengeEvaluator.get_scenario_class_or_fail(config.type)
client = carla.Client(args.host, int(args.port))
client.set_timeout(self.client_timeout)
# Once we have a client we can retrieve the world that is currently
# running.
self.world = client.load_world(config.town)
# Wait for the world to be ready
self.world.wait_for_tick(self.wait_for_world)
# Create scenario manager
self.manager = ScenarioManager(self.world, args.debug)
try:
self.prepare_actors(config)
lat_ref, lon_ref = self._get_latlon_ref()
compact_route = self.compress_route(config.route.data,
config.ego_vehicle.transform.location,
config.target.transform.location)
gps_route = self.location_route_to_gps(compact_route, lat_ref, lon_ref)
self.agent_instance.set_global_plan(gps_route)
scenario = scenario_class(self.world,
self.ego_vehicle,
self.actors,
config.town,
args.randomize,
args.debug,
config)
except Exception as exception:
print("The scenario cannot be loaded")
print(exception)
self.cleanup(ego=True)
continue
# Load scenario and run it
self.manager.load_scenario(scenario)
# debug
if args.route_visible:
locations_route, _ = zip(*config.route.data)
self.draw_waypoints(locations_route, vertical_shift=1.0, persistency=scenario.timeout)
self.manager.run_scenario(self.agent_instance)
# Provide outputs if required
self.analyze_scenario(args, config)
# Stop scenario and cleanup
self.manager.stop_scenario()
del scenario
self.cleanup(ego=True)
self.agent_instance.destroy()
self.final_summary(args)
# stop CARLA server
self._carla_server.stop()
if __name__ == '__main__':
DESCRIPTION = ("CARLA AD Challenge evaluation: evaluate your Agent in CARLA scenarios\n")
PARSER = argparse.ArgumentParser(description=DESCRIPTION,
formatter_class=RawTextHelpFormatter)
PARSER.add_argument('--host', default='localhost',
help='IP of the host server (default: localhost)')
PARSER.add_argument('--port', default='2000',
help='TCP port to listen to (default: 2000)')
PARSER.add_argument("--use-docker", type=bool, help="Use docker to run CARLA?", default=False)
PARSER.add_argument('--docker-version', type=str, help='Docker version to use for CARLA server', default="0.9.3")
PARSER.add_argument("-a", "--agent", type=str, help="Path to Agent's py file to evaluate")
PARSER.add_argument("--config", type=str, help="Path to Agent's configuration file", default="")
PARSER.add_argument('--route-visible', action="store_true", help='Run with a visible route')
PARSER.add_argument('--debug', action="store_true", help='Run with debug output')
PARSER.add_argument('--file', action="store_true", help='Write results into a txt file')
# pylint: disable=line-too-long
PARSER.add_argument(
'--scenario', help='Name of the scenario to be executed. Use the preposition \'group:\' to run all scenarios of one class, e.g. ControlLoss or FollowLeadingVehicle')
# pylint: enable=line-too-long
PARSER.add_argument('--randomize', action="store_true", help='Scenario parameters are randomized')
PARSER.add_argument('--repetitions', default=1, help='Number of scenario executions')
PARSER.add_argument('--list', action="store_true", help='List all supported scenarios and exit')
PARSER.add_argument('--list_class', action="store_true", help='List all supported scenario classes and exit')
ARGUMENTS = PARSER.parse_args()
CARLA_ROOT = os.environ.get('CARLA_ROOT')
ROOT_SCENARIO_RUNNER = os.environ.get('ROOT_SCENARIO_RUNNER')
if not CARLA_ROOT:
print("Error. CARLA_ROOT not found. Please run setup_environment.sh first.")
sys.exit(0)
if not ROOT_SCENARIO_RUNNER:
print("Error. ROOT_SCENARIO_RUNNER not found. Please run setup_environment.sh first.")
sys.exit(0)
ARGUMENTS.carla_root = CARLA_ROOT
if ARGUMENTS.list:
print("Currently the following scenarios are supported:")
print(*get_list_of_scenarios(), sep='\n')
sys.exit(0)
if ARGUMENTS.list_class:
print("Currently the following scenario classes are supported:")
print(*SCENARIOS.keys(), sep='\n')
sys.exit(0)
if ARGUMENTS.scenario is None:
print("Please specify a scenario using '--scenario SCENARIONAME'\n\n")
PARSER.print_help(sys.stdout)
sys.exit(0)
try:
challenge_evaluator = ChallengeEvaluator(ARGUMENTS)
challenge_evaluator.run(ARGUMENTS)
finally:
del challenge_evaluator
| {"/srunner/scenarios/object_crash_intersection.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/opposite_vehicle_taking_priority.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/challenge/challenge_evaluator.py": ["/srunner/challenge/envs/sensor_interface.py", "/srunner/scenarios/challenge_basic.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/challenge_basic.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenariomanager/atomic_scenario_behavior.py": ["/srunner/scenariomanager/carla_data_provider.py"], "/srunner/scenariomanager/scenario_manager.py": ["/srunner/scenariomanager/carla_data_provider.py", "/srunner/scenariomanager/traffic_events.py"], "/scenario_runner.py": ["/srunner/scenarios/follow_leading_vehicle.py", "/srunner/scenarios/opposite_vehicle_taking_priority.py", "/srunner/scenarios/object_crash_vehicle.py", "/srunner/scenarios/object_crash_intersection.py", "/srunner/scenarios/control_loss.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/basic_scenario.py": ["/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/control_loss.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/follow_leading_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/object_crash_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"]} |
67,375 | chauvinSimon/scenario_runner | refs/heads/master | /srunner/challenge/envs/sensor_interface.py | import copy
import logging
import numpy as np
import os
import time
from threading import Thread
import carla
def threaded(fn):
def wrapper(*args, **kwargs):
thread = Thread(target=fn, args=args, kwargs=kwargs)
thread.setDaemon(True)
thread.start()
return thread
return wrapper
class HDMapMeasurement(object):
def __init__(self, data, frame_number):
self.data = data
self.frame_number = frame_number
class HDMapReader(object):
def __init__(self, vehicle, reading_frequency=1.0):
self._vehicle = vehicle
self._reading_frequency = reading_frequency
self._CARLA_ROOT = os.getenv('CARLA_ROOT', "./")
self._callback = None
self._frame_number = 0
self._run_ps = True
self.run()
def __call__(self):
map_name = os.path.basename(self._vehicle.get_world().get_map().name)
transform = self._vehicle.get_transform()
return {'map_file': "{}/HDMaps/{}.ply".format(self._CARLA_ROOT, map_name),
'transform': {'x': transform.location.x,
'y': transform.location.y,
'z': transform.location.z,
'yaw': transform.rotation.yaw,
'pitch': transform.rotation.pitch,
'roll': transform.rotation.roll}
}
@threaded
def run(self):
latest_read = time.time()
while self._run_ps:
if self._callback is not None:
capture = time.time()
if capture - latest_read > (1 / self._reading_frequency):
self._callback(HDMapMeasurement(self.__call__(), self._frame_number))
self._frame_number += 1
latest_read = time.time()
else:
time.sleep(0.001)
def listen(self, callback):
# Tell that this function receives what the producer does.
self._callback = callback
def destroy(self):
self._run_ps = False
class SpeedMeasurement(object):
def __init__(self, data, frame_number):
self.data = data
self.frame_number = frame_number
class Speedometer(object):
"""
Speed pseudo sensor that gets the current speed of the vehicle.
This sensor is not placed at the CARLA environment. It is
only an asynchronous interface to the forward speed.
"""
def __init__(self, vehicle, reading_frequency):
# The vehicle where the class reads the speed
self._vehicle = vehicle
# How often do you look at your speedometer in hz
self._reading_frequency = reading_frequency
self._callback = None
# Counts the frames
self._frame_number = 0
self._run_ps = True
self.produce_speed()
def _get_forward_speed(self):
""" Convert the vehicle transform directly to forward speed """
velocity = self._vehicle.get_velocity()
transform = self._vehicle.get_transform()
vel_np = np.array([velocity.x, velocity.y, velocity.z])
pitch = np.deg2rad(transform.rotation.pitch)
yaw = np.deg2rad(transform.rotation.yaw)
orientation = np.array([np.cos(pitch) * np.cos(yaw), np.cos(pitch) * np.sin(yaw), np.sin(pitch)])
speed = np.dot(vel_np, orientation)
return speed
@threaded
def produce_speed(self):
latest_speed_read = time.time()
while self._run_ps:
if self._callback is not None:
capture = time.time()
if capture - latest_speed_read > (1 / self._reading_frequency):
self._callback(SpeedMeasurement(self._get_forward_speed(), self._frame_number))
self._frame_number += 1
latest_speed_read = time.time()
else:
time.sleep(0.001)
def listen(self, callback):
# Tell that this function receives what the producer does.
self._callback = callback
def destroy(self):
self._run_ps = False
class CallBack(object):
def __init__(self, tag, sensor, data_provider):
self._tag = tag
self._data_provider = data_provider
self._data_provider.register_sensor(tag, sensor)
def __call__(self, data):
if isinstance(data, carla.Image):
self._parse_image_cb(data, self._tag)
elif isinstance(data, carla.LidarMeasurement):
self._parse_lidar_cb(data, self._tag)
elif isinstance(data, carla.GnssEvent):
self._parse_gnss_cb(data, self._tag)
elif isinstance(data, SpeedMeasurement):
self._parse_speedometer(data, self._tag)
elif isinstance(data, HDMapMeasurement):
self._parse_hdmap(data, self._tag)
else:
logging.error('No callback method for this sensor.')
def _parse_image_cb(self, image, tag):
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = copy.deepcopy(array)
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
self._data_provider.update_sensor(tag, array, image.frame_number)
def _parse_lidar_cb(self, lidar_data, tag):
points = np.frombuffer(lidar_data.raw_data, dtype=np.dtype('f4'))
points = copy.deepcopy(points)
points = np.reshape(points, (int(points.shape[0] / 3), 3))
self._data_provider.update_sensor(tag, points, lidar_data.frame_number)
def _parse_gnss_cb(self, gnss_data, tag):
array = np.array([gnss_data.latitude,
gnss_data.longitude,
gnss_data.altitude], dtype=np.float32)
self._data_provider.update_sensor(tag, array, gnss_data.frame_number)
def _parse_speedometer(self, speed, tag):
self._data_provider.update_sensor(tag, speed.data, speed.frame_number)
def _parse_hdmap(self, hd_package, tag):
self._data_provider.update_sensor(tag, hd_package.data, hd_package.frame_number)
class SensorInterface(object):
def __init__(self):
self._sensors_objects = {}
self._data_buffers = {}
self._timestamps = {}
def register_sensor(self, tag, sensor):
if tag in self._sensors_objects:
raise ValueError("Duplicated sensor tag [{}]".format(tag))
self._sensors_objects[tag] = sensor
self._data_buffers[tag] = None
self._timestamps[tag] = -1
def update_sensor(self, tag, data, timestamp):
if tag not in self._sensors_objects:
raise ValueError("The sensor with tag [{}] has not been created!".format(tag))
self._data_buffers[tag] = data
self._timestamps[tag] = timestamp
def all_sensors_ready(self):
for key in self._sensors_objects.keys():
if self._data_buffers[key] is None:
return False
return True
def get_data(self):
data_dict = {}
for key in self._sensors_objects.keys():
data_dict[key] = (self._timestamps[key], copy.deepcopy(self._data_buffers[key]))
return data_dict | {"/srunner/scenarios/object_crash_intersection.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/opposite_vehicle_taking_priority.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/challenge/challenge_evaluator.py": ["/srunner/challenge/envs/sensor_interface.py", "/srunner/scenarios/challenge_basic.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/challenge_basic.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenariomanager/atomic_scenario_behavior.py": ["/srunner/scenariomanager/carla_data_provider.py"], "/srunner/scenariomanager/scenario_manager.py": ["/srunner/scenariomanager/carla_data_provider.py", "/srunner/scenariomanager/traffic_events.py"], "/scenario_runner.py": ["/srunner/scenarios/follow_leading_vehicle.py", "/srunner/scenarios/opposite_vehicle_taking_priority.py", "/srunner/scenarios/object_crash_vehicle.py", "/srunner/scenarios/object_crash_intersection.py", "/srunner/scenarios/control_loss.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/basic_scenario.py": ["/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/control_loss.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/follow_leading_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/object_crash_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"]} |
67,376 | chauvinSimon/scenario_runner | refs/heads/master | /srunner/scenarios/challenge_basic.py | #!/usr/bin/env python
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
Basic CARLA Autonomous Driving training scenario
"""
import py_trees
from srunner.scenariomanager.atomic_scenario_behavior import *
from srunner.scenariomanager.atomic_scenario_criteria import *
from srunner.scenarios.basic_scenario import *
CHALLENGE_BASIC_SCENARIOS = ["ChallengeBasic"]
class ChallengeBasic(BasicScenario):
"""
Implementation of a dummy scenario
"""
category = "ChallengeBasic"
radius = 10.0 # meters
timeout = 300 # Timeout of scenario in seconds
def __init__(self, world, ego_vehicle, other_actors, town, randomize=False, debug_mode=False, config=None):
"""
Setup all relevant parameters and create scenario
"""
self.config = config
self.target = None
self.route = None
if hasattr(self.config, 'target'):
self.target = self.config.target
if hasattr(self.config, 'route'):
self.route = self.config.route.data
super(ChallengeBasic, self).__init__("ChallengeBasic", ego_vehicle, other_actors, town, world, debug_mode, True)
def _create_behavior(self):
"""
Basic behavior do nothing, i.e. Idle
"""
# Build behavior tree
sequence = py_trees.composites.Sequence("Sequence Behavior")
idle_behavior = Idle()
sequence.add_child(idle_behavior)
return sequence
def _create_test_criteria(self):
"""
A list of all test criteria will be created that is later used
in parallel behavior tree.
"""
collision_criterion = CollisionTest(self.ego_vehicle, terminate_on_failure=True)
target_criterion = InRadiusRegionTest(self.ego_vehicle,
x=self.target.transform.location.x ,
y=self.target.transform.location.y,
radius=self.radius)
route_criterion = InRouteTest(self.ego_vehicle,
radius=30.0,
route=self.route,
offroad_max=20,
terminate_on_failure=True)
completion_criterion = RouteCompletionTest(self.ego_vehicle, route=self.route)
wrong_way_criterion = WrongLaneTest(self.ego_vehicle)
red_light_criterion = RunningRedLightTest(self.ego_vehicle)
parallel_criteria = py_trees.composites.Parallel("group_criteria",
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
parallel_criteria.add_child(completion_criterion)
parallel_criteria.add_child(collision_criterion)
parallel_criteria.add_child(target_criterion)
parallel_criteria.add_child(route_criterion)
parallel_criteria.add_child(wrong_way_criterion)
parallel_criteria.add_child(red_light_criterion)
return parallel_criteria
| {"/srunner/scenarios/object_crash_intersection.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/opposite_vehicle_taking_priority.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/challenge/challenge_evaluator.py": ["/srunner/challenge/envs/sensor_interface.py", "/srunner/scenarios/challenge_basic.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/challenge_basic.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenariomanager/atomic_scenario_behavior.py": ["/srunner/scenariomanager/carla_data_provider.py"], "/srunner/scenariomanager/scenario_manager.py": ["/srunner/scenariomanager/carla_data_provider.py", "/srunner/scenariomanager/traffic_events.py"], "/scenario_runner.py": ["/srunner/scenarios/follow_leading_vehicle.py", "/srunner/scenarios/opposite_vehicle_taking_priority.py", "/srunner/scenarios/object_crash_vehicle.py", "/srunner/scenarios/object_crash_intersection.py", "/srunner/scenarios/control_loss.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/basic_scenario.py": ["/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/control_loss.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/follow_leading_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/object_crash_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"]} |
67,377 | chauvinSimon/scenario_runner | refs/heads/master | /srunner/scenarios/config_parser.py | #!/usr/bin/env python
# Copyright (c) 2019 Intel Labs.
# authors: Fabian Oboril (fabian.oboril@intel.com)
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
This module provides a parser for scenario configuration files
"""
import glob
import os
import xml.etree.ElementTree as ET
import carla
from agents.navigation.local_planner import RoadOption
class RouteConfiguration(object):
"""
This class provides the basic configuration for a route
"""
def __init__(self, node):
self.data = []
for waypoint in node.iter("waypoint"):
x = float(set_attrib(waypoint, 'x', 0))
y = float(set_attrib(waypoint, 'y', 0))
z = float(set_attrib(waypoint, 'z', 0))
c = set_attrib(waypoint, 'connection', '')
connection = RoadOption[c.split('.')[1]]
self.data.append((carla.Location(x, y, z), connection))
class TargetConfiguration(object):
"""
This class provides the basic configuration for a target location
"""
transform = None
def __init__(self, node):
pos_x = float(set_attrib(node, 'x', 0))
pos_y = float(set_attrib(node, 'y', 0))
pos_z = float(set_attrib(node, 'z', 0))
self.transform = carla.Transform(carla.Location(x=pos_x, y=pos_y, z=pos_z))
class ActorConfiguration(object):
"""
This class provides the basic actor configuration for a
scenario:
- Location and rotation (transform)
- Model (e.g. Lincoln MKZ2017)
"""
transform = None
model = None
autopilot = False
random_location = False
def __init__(self, node):
pos_x = float(set_attrib(node, 'x', 0))
pos_y = float(set_attrib(node, 'y', 0))
pos_z = float(set_attrib(node, 'z', 0))
yaw = float(set_attrib(node, 'yaw', 0))
if 'random_location' in node.keys():
self.random_location = True
if 'autopilot' in node.keys():
self.autopilot = True
self.transform = carla.Transform(carla.Location(x=pos_x, y=pos_y, z=pos_z), carla.Rotation(yaw=yaw))
self.model = set_attrib(node, 'model', 'vehicle.*')
class ScenarioConfiguration(object):
"""
This class provides a basic scenario configuration incl.:
- configurations for all actors
- town, where the scenario should be executed
- name of the scenario (e.g. ControlLoss_1)
- type is the class of scenario (e.g. ControlLoss)
"""
ego_vehicle = None
other_actors = []
town = None
name = None
type = None
target = None
route = None
def set_attrib(node, key, default):
"""
Parse XML key for a given node
If key does not exist, use default value
"""
return node.attrib[key] if key in node.attrib else default
def parse_scenario_configuration(file_name, scenario_name):
"""
Parse scenario configuration file and provide a list of
ScenarioConfigurations @return
If scenario_name starts with "group:" all scenarios within
the config file will be returned. Otherwise only the scenario,
that matches the scenario_name.
"""
single_scenario_only = True
if scenario_name.startswith("group:"):
single_scenario_only = False
scenario_name = scenario_name[6:]
file_name = scenario_name
scenario_config_file = os.getenv('ROOT_SCENARIO_RUNNER', "./") + "/srunner/configs/" + file_name + ".xml"
tree = ET.parse(scenario_config_file)
scenario_configurations = []
for scenario in tree.iter("scenario"):
new_config = ScenarioConfiguration()
new_config.town = set_attrib(scenario, 'town', None)
new_config.name = set_attrib(scenario, 'name', None)
new_config.type = set_attrib(scenario, 'type', None)
new_config.other_actors = []
for ego_vehicle in scenario.iter("ego_vehicle"):
new_config.ego_vehicle = ActorConfiguration(ego_vehicle)
for target in scenario.iter("target"):
new_config.target = TargetConfiguration(target)
for route in scenario.iter("route"):
new_config.route = RouteConfiguration(route)
for other_actor in scenario.iter("other_actor"):
new_config.other_actors.append(ActorConfiguration(other_actor))
if single_scenario_only:
if new_config.name == scenario_name:
scenario_configurations.append(new_config)
else:
scenario_configurations.append(new_config)
return scenario_configurations
def get_list_of_scenarios():
"""
Parse *all* config files and provide a list with all scenarios @return
"""
list_of_config_files = glob.glob("{}/srunner/configs/*.xml".format(os.getenv('ROOT_SCENARIO_RUNNER', "./")))
scenarios = []
for file_name in list_of_config_files:
tree = ET.parse(file_name)
for scenario in tree.iter("scenario"):
scenarios.append(set_attrib(scenario, 'name', None))
return scenarios
def find_scenario_config(scenario_name):
"""
Parse *all* config files and find first match for scenario config
"""
list_of_config_files = glob.glob("{}/srunner/configs/*.xml".format(os.getenv('ROOT_SCENARIO_RUNNER', "./")))
for file_name in list_of_config_files:
tree = ET.parse(file_name)
for scenario in tree.iter("scenario"):
if set_attrib(scenario, 'name', None) == scenario_name:
return os.path.basename(file_name).split(".")[0]
return None
| {"/srunner/scenarios/object_crash_intersection.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/opposite_vehicle_taking_priority.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/challenge/challenge_evaluator.py": ["/srunner/challenge/envs/sensor_interface.py", "/srunner/scenarios/challenge_basic.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/challenge_basic.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenariomanager/atomic_scenario_behavior.py": ["/srunner/scenariomanager/carla_data_provider.py"], "/srunner/scenariomanager/scenario_manager.py": ["/srunner/scenariomanager/carla_data_provider.py", "/srunner/scenariomanager/traffic_events.py"], "/scenario_runner.py": ["/srunner/scenarios/follow_leading_vehicle.py", "/srunner/scenarios/opposite_vehicle_taking_priority.py", "/srunner/scenarios/object_crash_vehicle.py", "/srunner/scenarios/object_crash_intersection.py", "/srunner/scenarios/control_loss.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/basic_scenario.py": ["/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/control_loss.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/follow_leading_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/object_crash_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"]} |
67,378 | chauvinSimon/scenario_runner | refs/heads/master | /srunner/scenariomanager/atomic_scenario_behavior.py | #!/usr/bin/env python
# Copyright (c) 2018-2019 Intel Labs.
# authors: Fabian Oboril (fabian.oboril@intel.com)
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
This module provides all atomic scenario behaviors required to realize
complex, realistic scenarios such as "follow a leading vehicle", "lane change",
etc.
The atomic behaviors are implemented with py_trees.
"""
import carla
import py_trees
from agents.navigation.roaming_agent import *
from agents.navigation.basic_agent import *
from srunner.scenariomanager.carla_data_provider import CarlaDataProvider
EPSILON = 0.001
def calculate_distance(location, other_location):
"""
Method to calculate the distance between to locations
Note: It uses the direct distance between the current location and the
target location to estimate the time to arrival.
To be accurate, it would have to use the distance along the
(shortest) route between the two locations.
"""
return location.distance(other_location)
class AtomicBehavior(py_trees.behaviour.Behaviour):
"""
Base class for all atomic behaviors used to setup a scenario
Important parameters:
- name: Name of the atomic behavior
"""
def __init__(self, name):
super(AtomicBehavior, self).__init__(name)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
self.name = name
def setup(self, unused_timeout=15):
self.logger.debug("%s.setup()" % (self.__class__.__name__))
return True
def initialise(self):
self.logger.debug("%s.initialise()" % (self.__class__.__name__))
def terminate(self, new_status):
self.logger.debug("%s.terminate()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
class StandStill(AtomicBehavior):
"""
This class contains a standstill behavior of a scenario
"""
def __init__(self, actor, name):
"""
Setup actor
"""
super(StandStill, self).__init__(name)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
self._actor = actor
def update(self):
"""
Check if the _actor stands still (v=0)
"""
new_status = py_trees.common.Status.RUNNING
velocity = CarlaDataProvider.get_velocity(self._actor)
if velocity < EPSILON:
new_status = py_trees.common.Status.SUCCESS
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
return new_status
class InTriggerRegion(AtomicBehavior):
"""
This class contains the trigger region (condition) of a scenario
"""
def __init__(self, actor, min_x, max_x, min_y, max_y, name="TriggerRegion"):
"""
Setup trigger region (rectangle provided by
[min_x,min_y] and [max_x,max_y]
"""
super(InTriggerRegion, self).__init__(name)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
self._actor = actor
self._min_x = min_x
self._max_x = max_x
self._min_y = min_y
self._max_y = max_y
def update(self):
"""
Check if the _actor location is within trigger region
"""
new_status = py_trees.common.Status.RUNNING
location = CarlaDataProvider.get_location(self._actor)
if location is None:
return new_status
not_in_region = (location.x < self._min_x or location.x > self._max_x) or (
location.y < self._min_y or location.y > self._max_y)
if not not_in_region:
new_status = py_trees.common.Status.SUCCESS
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
return new_status
class InTriggerDistanceToVehicle(AtomicBehavior):
"""
This class contains the trigger distance (condition) between to actors
of a scenario
"""
def __init__(self, other_actor, actor, distance, name="TriggerDistanceToVehicle"):
"""
Setup trigger distance
"""
super(InTriggerDistanceToVehicle, self).__init__(name)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
self._other_actor = other_actor
self._actor = actor
self._distance = distance
def update(self):
"""
Check if the ego vehicle is within trigger distance to other actor
"""
new_status = py_trees.common.Status.RUNNING
ego_location = CarlaDataProvider.get_location(self._actor)
other_location = CarlaDataProvider.get_location(self._other_actor)
if ego_location is None or other_location is None:
return new_status
if calculate_distance(ego_location, other_location) < self._distance:
new_status = py_trees.common.Status.SUCCESS
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
return new_status
class InTriggerDistanceToLocation(AtomicBehavior):
"""
This class contains the trigger (condition) for a distance to a fixed
location of a scenario
"""
def __init__(self, actor, target_location, distance, name="InTriggerDistanceToLocation"):
"""
Setup trigger distance
"""
super(InTriggerDistanceToLocation, self).__init__(name)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
self._target_location = target_location
self._actor = actor
self._distance = distance
def update(self):
"""
Check if the actor is within trigger distance to the target location
"""
new_status = py_trees.common.Status.RUNNING
location = CarlaDataProvider.get_location(self._actor)
if location is None:
return new_status
if calculate_distance(
location, self._target_location) < self._distance:
new_status = py_trees.common.Status.SUCCESS
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
return new_status
class InTriggerDistanceToNextIntersection(AtomicBehavior):
"""
This class contains the trigger (condition) for a distance to the
next intersection of a scenario
"""
def __init__(self, actor, distance, name="InTriggerDistanceToNextIntersection"):
"""
Setup trigger distance
"""
super(InTriggerDistanceToNextIntersection, self).__init__(name)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
self._actor = actor
self._distance = distance
self._map = self._actor.get_world().get_map()
waypoint = self._map.get_waypoint(self._actor.get_location())
while not waypoint.is_intersection:
waypoint = waypoint.next(1)[-1]
self._final_location = waypoint.transform.location
def update(self):
"""
Check if the actor is within trigger distance to the intersection
"""
new_status = py_trees.common.Status.RUNNING
current_waypoint = self._map.get_waypoint(CarlaDataProvider.get_location(self._actor))
distance = calculate_distance(current_waypoint.transform.location, self._final_location)
if distance < self._distance:
new_status = py_trees.common.Status.SUCCESS
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
return new_status
class TriggerVelocity(AtomicBehavior):
"""
This class contains the trigger velocity (condition) of a scenario
The behavior is successful, if the actor is at least as fast as requested
"""
def __init__(self, actor, target_velocity, name="TriggerVelocity"):
"""
Setup trigger velocity
"""
super(TriggerVelocity, self).__init__(name)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
self._actor = actor
self._target_velocity = target_velocity
def update(self):
"""
Check if the actor has the trigger velocity
"""
new_status = py_trees.common.Status.RUNNING
delta_velocity = self._target_velocity - CarlaDataProvider.get_velocity(self._actor)
if delta_velocity < EPSILON:
new_status = py_trees.common.Status.SUCCESS
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
return new_status
class InTimeToArrivalToLocation(AtomicBehavior):
"""
This class contains a check if a actor arrives within a given time
at a given location.
"""
_max_time_to_arrival = float('inf') # time to arrival in seconds
def __init__(self, actor, time, location, name="TimeToArrival"):
"""
Setup parameters
"""
super(InTimeToArrivalToLocation, self).__init__(name)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
self._actor = actor
self._time = time
self._target_location = location
def update(self):
"""
Check if the actor can arrive at target_location within time
"""
new_status = py_trees.common.Status.RUNNING
current_location = CarlaDataProvider.get_location(self._actor)
if current_location is None:
return new_status
distance = calculate_distance(current_location, self._target_location)
velocity = CarlaDataProvider.get_velocity(self._actor)
# if velocity is too small, simply use a large time to arrival
time_to_arrival = self._max_time_to_arrival
if velocity > EPSILON:
time_to_arrival = distance / velocity
if time_to_arrival < self._time:
new_status = py_trees.common.Status.SUCCESS
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
return new_status
class InTimeToArrivalToVehicle(AtomicBehavior):
"""
This class contains a check if a actor arrives within a given time
at another actor.
"""
_max_time_to_arrival = float('inf') # time to arrival in seconds
def __init__(self, other_actor, actor, time, name="TimeToArrival"):
"""
Setup parameters
"""
super(InTimeToArrivalToVehicle, self).__init__(name)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
self._other_actor = other_actor
self._actor = actor
self._time = time
def update(self):
"""
Check if the ego vehicle can arrive at other actor within time
"""
new_status = py_trees.common.Status.RUNNING
current_location = CarlaDataProvider.get_location(self._actor)
target_location = CarlaDataProvider.get_location(self._other_actor)
if current_location is None or target_location is None:
return new_status
distance = calculate_distance(current_location, target_location)
current_velocity = CarlaDataProvider.get_velocity(self._actor)
other_velocity = CarlaDataProvider.get_velocity(self._other_actor)
# if velocity is too small, simply use a large time to arrival
time_to_arrival = self._max_time_to_arrival
if current_velocity > other_velocity:
time_to_arrival = 2 * distance / (current_velocity - other_velocity)
if time_to_arrival < self._time:
new_status = py_trees.common.Status.SUCCESS
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
return new_status
class AccelerateToVelocity(AtomicBehavior):
"""
This class contains an atomic acceleration behavior. The controlled
traffic participant will accelerate with _throttle_value_ until reaching
a given _target_velocity_
"""
def __init__(self, actor, throttle_value, target_velocity, name="Acceleration"):
"""
Setup parameters including acceleration value (via throttle_value)
and target velocity
"""
super(AccelerateToVelocity, self).__init__(name)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
self._control = carla.VehicleControl()
self._actor = actor
self._throttle_value = throttle_value
self._target_velocity = target_velocity
self._control.steering = 0
def update(self):
"""
Set throttle to throttle_value, as long as velocity is < target_velocity
"""
new_status = py_trees.common.Status.RUNNING
if CarlaDataProvider.get_velocity(self._actor) < self._target_velocity:
self._control.throttle = self._throttle_value
else:
new_status = py_trees.common.Status.SUCCESS
self._control.throttle = 0
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
self._actor.apply_control(self._control)
return new_status
class KeepVelocity(AtomicBehavior):
"""
This class contains an atomic behavior to keep the provided velocity.
The controlled traffic participant will accelerate as fast as possible
until reaching a given _target_velocity_, which is then maintained for
as long as this behavior is active.
Note: In parallel to this behavior a termination behavior has to be used
to keep the velocity either for a certain duration, or for a certain
distance, etc.
"""
def __init__(self, actor, target_velocity, name="KeepVelocity"):
"""
Setup parameters including acceleration value (via throttle_value)
and target velocity
"""
super(KeepVelocity, self).__init__(name)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
self._control = carla.VehicleControl()
self._actor = actor
self._target_velocity = target_velocity
self._control.steering = 0
def update(self):
"""
Set throttle to throttle_value, as long as velocity is < target_velocity
"""
new_status = py_trees.common.Status.RUNNING
if CarlaDataProvider.get_velocity(self._actor) < self._target_velocity:
self._control.throttle = 1.0
else:
self._control.throttle = 0.0
self._actor.apply_control(self._control)
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
return new_status
def terminate(self, new_status):
"""
On termination of this behavior, the throttle should be set back to 0.,
to avoid further acceleration.
"""
self._control.throttle = 0.0
self._actor.apply_control(self._control)
super(KeepVelocity, self).terminate(new_status)
class DriveDistance(AtomicBehavior):
"""
This class contains an atomic behavior to drive a certain distance.
"""
def __init__(self, actor, distance, name="DriveDistance"):
"""
Setup parameters
"""
super(DriveDistance, self).__init__(name)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
self._target_distance = distance
self._distance = 0
self._location = None
self._actor = actor
def initialise(self):
self._location = CarlaDataProvider.get_location(self._actor)
super(DriveDistance, self).initialise()
def update(self):
"""
Check driven distance
"""
new_status = py_trees.common.Status.RUNNING
new_location = CarlaDataProvider.get_location(self._actor)
self._distance += calculate_distance(self._location, new_location)
self._location = new_location
if self._distance > self._target_distance:
new_status = py_trees.common.Status.SUCCESS
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
return new_status
class UseAutoPilot(AtomicBehavior):
"""
This class contains an atomic behavior to use the auto pilot.
Note: In parallel to this behavior a termination behavior has to be used
to terminate this behavior after a certain duration, or after a
certain distance, etc.
"""
def __init__(self, actor, name="UseAutoPilot"):
"""
Setup parameters
"""
super(UseAutoPilot, self).__init__(name)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
self._actor = actor
def update(self):
"""
Activate autopilot
"""
new_status = py_trees.common.Status.RUNNING
self._actor.set_autopilot(True)
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
return new_status
def terminate(self, new_status):
"""
Deactivate autopilot
"""
self._actor.set_autopilot(False)
super(UseAutoPilot, self).terminate(new_status)
class StopVehicle(AtomicBehavior):
"""
This class contains an atomic stopping behavior. The controlled traffic
participant will decelerate with _bake_value_ until reaching a full stop.
"""
def __init__(self, actor, brake_value, name="Stopping"):
"""
Setup _actor and maximum braking value
"""
super(StopVehicle, self).__init__(name)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
self._control = carla.VehicleControl()
self._actor = actor
self._brake_value = brake_value
self._control.steering = 0
def update(self):
"""
Set brake to brake_value until reaching full stop
"""
new_status = py_trees.common.Status.RUNNING
if CarlaDataProvider.get_velocity(self._actor) > EPSILON:
self._control.brake = self._brake_value
else:
new_status = py_trees.common.Status.SUCCESS
self._control.brake = 0
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
self._actor.apply_control(self._control)
return new_status
class WaitForTrafficLightState(AtomicBehavior):
"""
This class contains an atomic behavior to wait for a given traffic light
to have the desired state.
"""
def __init__(self, traffic_light, state, name="WaitForTrafficLightState"):
"""
Setup traffic_light
"""
super(WaitForTrafficLightState, self).__init__(name)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
self._traffic_light = traffic_light
self._traffic_light_state = state
def update(self):
"""
Set status to SUCCESS, when traffic light state is RED
"""
new_status = py_trees.common.Status.RUNNING
# the next line may throw, if self._traffic_light is not a traffic
# light, but another actor. This is intended.
if str(self._traffic_light.state) == self._traffic_light_state:
new_status = py_trees.common.Status.SUCCESS
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
return new_status
def terminate(self, new_status):
self._traffic_light = None
super(WaitForTrafficLightState, self).terminate(new_status)
class SyncArrival(AtomicBehavior):
"""
This class contains an atomic behavior to
set velocity of actor so that it reaches location at the same time as
actor_reference. The behaviour assumes that the two actors are moving
towards location in a straight line.
Note: In parallel to this behavior a termination behavior has to be used
to keep continue scynhronisation for a certain duration, or for a
certain distance, etc.
"""
def __init__(self, actor, actor_reference, target_location, gain=1, name="SyncArrival"):
"""
actor : actor to be controlled
actor_ reference : reference actor with which arrival has to be
synchronised
gain : coefficient for actor's throttle and break
controls
"""
super(SyncArrival, self).__init__(name)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
self._control = carla.VehicleControl()
self._actor = actor
self._actor_reference = actor_reference
self._target_location = target_location
self._gain = gain
self._control.steering = 0
def update(self):
"""
Dynamic control update for actor velocity
"""
new_status = py_trees.common.Status.RUNNING
distance_reference = calculate_distance(CarlaDataProvider.get_location(self._actor_reference),
self._target_location)
distance = calculate_distance(CarlaDataProvider.get_location(self._actor),
self._target_location)
velocity_reference = CarlaDataProvider.get_velocity(self._actor_reference)
time_reference = float('inf')
if velocity_reference > 0:
time_reference = distance_reference / velocity_reference
velocity_current = CarlaDataProvider.get_velocity(self._actor)
time_current = float('inf')
if velocity_current > 0:
time_current = distance / velocity_current
control_value = (self._gain) * (time_current - time_reference)
if control_value > 0:
self._control.throttle = min([control_value, 1])
self._control.brake = 0
else:
self._control.throttle = 0
self._control.brake = min([abs(control_value), 1])
self._actor.apply_control(self._control)
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
return new_status
def terminate(self, new_status):
"""
On termination of this behavior, the throttle should be set back to 0.,
to avoid further acceleration.
"""
self._control.throttle = 0.0
self._control.brake = 0.0
self._actor.apply_control(self._control)
super(SyncArrival, self).terminate(new_status)
class SteerVehicle(AtomicBehavior):
"""
This class contains an atomic steer behavior.
To set the steer value of the actor.
"""
def __init__(self, actor, steer_value, name="Steering"):
"""
Setup actor and maximum steer value
"""
super(SteerVehicle, self).__init__(name)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
self._control = carla.VehicleControl()
self._actor = actor
self._steer_value = steer_value
def update(self):
"""
Set steer to steer_value until reaching full stop
"""
self._control = self._actor.get_control()
self._control.steer = self._steer_value
new_status = py_trees.common.Status.SUCCESS
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
self._actor.apply_control(self._control)
return new_status
class BasicAgentBehavior(AtomicBehavior):
"""
This class contains an atomic behavior, which uses the
basic_agent from CARLA to control the actor until
reaching a target location.
"""
_acceptable_target_distance = 2
def __init__(self, actor, target_location, name="BasicAgentBehavior"):
"""
Setup actor and maximum steer value
"""
super(BasicAgentBehavior, self).__init__(name)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
self._agent = BasicAgent(actor)
self._agent.set_destination((target_location.x, target_location.y, target_location.z))
self._control = carla.VehicleControl()
self._actor = actor
self._target_location = target_location
def update(self):
new_status = py_trees.common.Status.RUNNING
self._control = self._agent.run_step()
location = CarlaDataProvider.get_location(self._actor)
if calculate_distance(location, self._target_location) < self._acceptable_target_distance:
new_status = py_trees.common.Status.SUCCESS
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
self._actor.apply_control(self._control)
return new_status
def terminate(self, new_status):
self._control.throttle = 0.0
self._control.brake = 0.0
self._actor.apply_control(self._control)
super(BasicAgentBehavior, self).terminate(new_status)
class Idle(AtomicBehavior):
"""
This class contains an idle behavior scenario
"""
def __init__(self, name="Idle"):
"""
Setup actor
"""
super(Idle, self).__init__(name)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
def update(self):
new_status = py_trees.common.Status.RUNNING
return new_status
| {"/srunner/scenarios/object_crash_intersection.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/opposite_vehicle_taking_priority.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/challenge/challenge_evaluator.py": ["/srunner/challenge/envs/sensor_interface.py", "/srunner/scenarios/challenge_basic.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/challenge_basic.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenariomanager/atomic_scenario_behavior.py": ["/srunner/scenariomanager/carla_data_provider.py"], "/srunner/scenariomanager/scenario_manager.py": ["/srunner/scenariomanager/carla_data_provider.py", "/srunner/scenariomanager/traffic_events.py"], "/scenario_runner.py": ["/srunner/scenarios/follow_leading_vehicle.py", "/srunner/scenarios/opposite_vehicle_taking_priority.py", "/srunner/scenarios/object_crash_vehicle.py", "/srunner/scenarios/object_crash_intersection.py", "/srunner/scenarios/control_loss.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/basic_scenario.py": ["/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/control_loss.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/follow_leading_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/object_crash_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"]} |
67,379 | chauvinSimon/scenario_runner | refs/heads/master | /srunner/scenariomanager/scenario_manager.py | #!/usr/bin/env python
# Copyright (c) 2018 Intel Labs.
# authors: Fabian Oboril (fabian.oboril@intel.com)
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
This module provides the Scenario and ScenarioManager implementations.
These must not be modified and are for reference only!
"""
from __future__ import print_function
import sys
import time
import threading
import py_trees
import srunner
from srunner.scenariomanager.carla_data_provider import CarlaDataProvider
from srunner.scenariomanager.result_writer import ResultOutputProvider
from srunner.scenariomanager.timer import GameTime, TimeOut
from srunner.scenariomanager.traffic_events import TrafficEvent, TrafficEventType
class Scenario(object):
"""
Basic scenario class. This class holds the behavior_tree describing the
scenario and the test criteria.
The user must not modify this class.
Important parameters:
- behavior: User defined scenario with py_tree
- criteria_list: List of user defined test criteria with py_tree
- timeout (default = 60s): Timeout of the scenario in seconds
- terminate_on_failure: Terminate scenario on first failure
"""
def __init__(self, behavior, criteria, name, timeout=60, terminate_on_failure=False):
self.behavior = behavior
self.test_criteria = criteria
self.timeout = timeout
if not isinstance(self.test_criteria, py_trees.composites.Parallel):
# list of nodes
for criterion in self.test_criteria:
criterion.terminate_on_failure = terminate_on_failure
# Create py_tree for test criteria
self.criteria_tree = py_trees.composites.Parallel(name="Test Criteria")
self.criteria_tree.add_children(self.test_criteria)
self.criteria_tree.setup(timeout=1)
else:
self.criteria_tree = criteria
# Create node for timeout
self.timeout_node = TimeOut(self.timeout, name="TimeOut")
# Create overall py_tree
self.scenario_tree = py_trees.composites.Parallel(name, policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
self.scenario_tree.add_child(self.behavior)
self.scenario_tree.add_child(self.timeout_node)
self.scenario_tree.add_child(self.criteria_tree)
self.scenario_tree.setup(timeout=1)
def terminate(self):
"""
This function sets the status of all leaves in the scenario tree to INVALID
"""
# Get list of all leaves in the tree
node_list = [self.scenario_tree]
more_nodes_exist = True
while more_nodes_exist:
more_nodes_exist = False
for node in node_list:
if node.children:
node_list.remove(node)
more_nodes_exist = True
for child in node.children:
node_list.append(child)
# Set status to INVALID
for node in node_list:
node.terminate(py_trees.common.Status.INVALID)
class ScenarioManager(object):
"""
Basic scenario manager class. This class holds all functionality
required to start, and analyze a scenario.
The user must not modify this class.
To use the ScenarioManager:
1. Create an object via manager = ScenarioManager()
2. Load a scenario via manager.load_scenario()
3. Trigger the execution of the scenario manager.execute()
This function is designed to explicitly control start and end of
the scenario execution
4. Trigger a result evaluation with manager.analyze()
5. Cleanup with manager.stop_scenario()
"""
scenario = None
scenario_tree = None
ego_vehicle = None
other_actors = None
def __init__(self, world, debug_mode=False):
"""
Init requires scenario as input
"""
self._debug_mode = debug_mode
self.agent = None
self._autonomous_agent_plugged = False
self._running = False
self._timestamp_last_run = 0.0
self._my_lock = threading.Lock()
self.scenario_duration_system = 0.0
self.scenario_duration_game = 0.0
self.start_system_time = None
self.end_system_time = None
world.on_tick(self._tick_scenario)
def load_scenario(self, scenario):
"""
Load a new scenario
"""
self.restart()
self.scenario = scenario.scenario
self.scenario_tree = self.scenario.scenario_tree
self.ego_vehicle = scenario.ego_vehicle
self.other_actors = scenario.other_actors
CarlaDataProvider.register_actor(self.ego_vehicle)
CarlaDataProvider.register_actors(self.other_actors)
# To print the scenario tree uncomment the next line
# py_trees.display.render_dot_tree(self.scenario_tree)
def restart(self):
"""
Reset all parameters
"""
self._running = False
self._timestamp_last_run = 0.0
self.scenario_duration_system = 0.0
self.scenario_duration_game = 0.0
self.start_system_time = None
self.end_system_time = None
GameTime.restart()
def run_scenario(self, agent=None):
"""
Trigger the start of the scenario and wait for it to finish/fail
"""
self.agent = agent
print("ScenarioManager: Running scenario {}".format(self.scenario_tree.name))
self.start_system_time = time.time()
start_game_time = GameTime.get_time()
self._running = True
while self._running:
time.sleep(0.5)
self.end_system_time = time.time()
end_game_time = GameTime.get_time()
self.scenario_duration_system = self.end_system_time - \
self.start_system_time
self.scenario_duration_game = end_game_time - start_game_time
if self.scenario_tree.status == py_trees.common.Status.FAILURE:
print("ScenarioManager: Terminated due to failure")
def _tick_scenario(self, timestamp):
"""
Run next tick of scenario
This function is a callback for world.on_tick()
Important:
- It hast to be ensured that the scenario has not yet completed/failed
and that the time moved forward.
- A thread lock should be used to avoid that the scenario tick is performed
multiple times in parallel.
"""
with self._my_lock:
if self._running and self._timestamp_last_run < timestamp.elapsed_seconds:
self._timestamp_last_run = timestamp.elapsed_seconds
if self._debug_mode:
print("\n--------- Tick ---------\n")
# Update game time and actor information
GameTime.on_carla_tick(timestamp)
CarlaDataProvider.on_carla_tick()
# Tick scenario
self.scenario_tree.tick_once()
if self.agent:
# Invoke agent
action = self.agent()
self.ego_vehicle.apply_control(action)
if self._debug_mode:
print("\n")
py_trees.display.print_ascii_tree(
self.scenario_tree, show_status=True)
sys.stdout.flush()
if self.scenario_tree.status != py_trees.common.Status.RUNNING:
self._running = False
def stop_scenario(self):
"""
This function triggers a proper termination of a scenario
"""
if self.scenario is not None:
self.scenario.terminate()
CarlaDataProvider.cleanup()
def analyze_scenario(self, stdout, filename, junit):
"""
This function is intended to be called from outside and provide
statistics about the scenario (human-readable, in form of a junit
report, etc.)
"""
failure = False
timeout = False
result = "SUCCESS"
if isinstance(self.scenario.test_criteria, py_trees.composites.Parallel):
if self.scenario.test_criteria.status == py_trees.common.Status.FAILURE:
failure = True
result = "FAILURE"
else:
for criterion in self.scenario.test_criteria:
if (not criterion.optional and
criterion.test_status != "SUCCESS" and
criterion.test_status != "ACCEPTABLE"):
failure = True
result = "FAILURE"
elif criterion.test_status == "ACCEPTABLE":
result = "ACCEPTABLE"
if self.scenario.timeout_node.timeout and not failure:
timeout = True
result = "TIMEOUT"
output = ResultOutputProvider(self, result, stdout, filename, junit)
output.write()
return failure or timeout
def analyze_scenario_challenge(self):
"""
This function is intended to be called from outside and provide
statistics about the scenario (human-readable, for the CARLA challenge.)
"""
PENALTY_COLLISION_STATIC = 10
PENALTY_COLLISION_VEHICLE = 10
PENALTY_COLLISION_PEDESTRIAN = 30
PENALTY_TRAFFIC_LIGHT = 10
PENALTY_WRONG_WAY = 5
target_reached = False
failure = False
result = "SUCCESS"
final_score = 0.0
score_penalty = 0.0
score_route = 0.0
return_message = ""
if isinstance(self.scenario.test_criteria, py_trees.composites.Parallel):
if self.scenario.test_criteria.status == py_trees.common.Status.FAILURE:
failure = True
result = "FAILURE"
if self.scenario.timeout_node.timeout and not failure:
result = "TIMEOUT"
list_traffic_events = []
for node in self.scenario.test_criteria.children:
if node.list_traffic_events:
list_traffic_events.extend(node.list_traffic_events)
list_collisions = []
list_red_lights = []
list_wrong_way = []
list_route_dev = []
# analyze all traffic events
for event in list_traffic_events:
if event.get_type() == TrafficEventType.COLLISION_STATIC:
score_penalty += PENALTY_COLLISION_STATIC
msg = event.get_message()
if msg:
list_collisions.append(event.get_message())
elif event.get_type() == TrafficEventType.COLLISION_VEHICLE:
score_penalty += PENALTY_COLLISION_VEHICLE
msg = event.get_message()
if msg:
list_collisions.append(event.get_message())
elif event.get_type() == TrafficEventType.COLLISION_PEDESTRIAN:
score_penalty += PENALTY_COLLISION_PEDESTRIAN
msg = event.get_message()
if msg:
list_collisions.append(event.get_message())
elif event.get_type() == TrafficEventType.TRAFFIC_LIGHT_INFRACTION:
score_penalty += PENALTY_TRAFFIC_LIGHT
msg = event.get_message()
if msg:
list_red_lights.append(event.get_message())
elif event.get_type() == TrafficEventType.WRONG_WAY_INFRACTION:
score_penalty += PENALTY_WRONG_WAY
msg = event.get_message()
if msg:
list_wrong_way.append(event.get_message())
elif event.get_type() == TrafficEventType.ROUTE_DEVIATION:
msg = event.get_message()
if msg:
list_route_dev.append(event.get_message())
elif event.get_type() == TrafficEventType.ROUTE_COMPLETED:
score_route = 100.0
target_reached = True
elif event.get_type() == TrafficEventType.ROUTE_COMPLETION:
if not target_reached:
score_route = event.get_dict()['route_completed']
final_score = max(score_route - score_penalty, 0)
return_message += "\n=================================="
return_message += "\n==[{}] [Score = {:.2f} : (route_score={}, infractions=-{})]".format(result,
final_score,
score_route,
score_penalty)
if list_collisions:
return_message += "\n===== Collisions:"
for item in list_collisions:
return_message += "\n========== {}".format(item)
if list_red_lights:
return_message += "\n===== Red lights:"
for item in list_red_lights:
return_message += "\n========== {}".format(item)
if list_wrong_way:
return_message += "\n===== Wrong way:"
for item in list_wrong_way:
return_message += "\n========== {}".format(item)
if list_route_dev:
return_message += "\n===== Route deviation:"
for item in list_route_dev:
return_message += "\n========== {}".format(item)
return_message += "\n=================================="
return result, final_score, return_message | {"/srunner/scenarios/object_crash_intersection.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/opposite_vehicle_taking_priority.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/challenge/challenge_evaluator.py": ["/srunner/challenge/envs/sensor_interface.py", "/srunner/scenarios/challenge_basic.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/challenge_basic.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenariomanager/atomic_scenario_behavior.py": ["/srunner/scenariomanager/carla_data_provider.py"], "/srunner/scenariomanager/scenario_manager.py": ["/srunner/scenariomanager/carla_data_provider.py", "/srunner/scenariomanager/traffic_events.py"], "/scenario_runner.py": ["/srunner/scenarios/follow_leading_vehicle.py", "/srunner/scenarios/opposite_vehicle_taking_priority.py", "/srunner/scenarios/object_crash_vehicle.py", "/srunner/scenarios/object_crash_intersection.py", "/srunner/scenarios/control_loss.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/basic_scenario.py": ["/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/control_loss.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/follow_leading_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/object_crash_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"]} |
67,380 | chauvinSimon/scenario_runner | refs/heads/master | /scenario_runner.py | #!/usr/bin/env python
# Copyright (c) 2018-2019 Intel Labs.
# authors: Fabian Oboril (fabian.oboril@intel.com)
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
Welcome to CARLA scenario_runner
This is the main script to be executed when running a scenario.
It loeads the scenario coniguration, loads the scenario and manager,
and finally triggers the scenario execution.
"""
from __future__ import print_function
import argparse
from argparse import RawTextHelpFormatter
from datetime import datetime
import traceback
import sys
import carla
from srunner.scenarios.follow_leading_vehicle import *
from srunner.scenarios.opposite_vehicle_taking_priority import *
from srunner.scenarios.object_crash_vehicle import *
from srunner.scenarios.no_signal_junction_crossing import *
from srunner.scenarios.object_crash_intersection import *
from srunner.scenarios.control_loss import *
from srunner.scenarios.config_parser import *
from srunner.scenariomanager.scenario_manager import ScenarioManager
# Version of scenario_runner
VERSION = 0.2
# Dictionary of all supported scenarios.
# key = Name of config file in Configs/
# value = List as defined in the scenario module
SCENARIOS = {
"FollowLeadingVehicle": FOLLOW_LEADING_VEHICLE_SCENARIOS,
"ObjectCrossing": OBJECT_CROSSING_SCENARIOS,
"RunningRedLight": RUNNING_RED_LIGHT_SCENARIOS,
"NoSignalJunction": NO_SIGNAL_JUNCTION_SCENARIOS,
"VehicleTurning": VEHICLE_TURNING_SCENARIOS,
"ControlLoss": CONTROL_LOSS_SCENARIOS
}
class ScenarioRunner(object):
"""
This is the core scenario runner module. It is responsible for
running (and repeating) a single scenario or a list of scenarios.
Usage:
scenario_runner = ScenarioRunner(args)
scenario_runner.run(args)
del scenario_runner
"""
ego_vehicle = None
actors = []
# Tunable parameters
client_timeout = 10.0 # in seconds
wait_for_world = 10.0 # in seconds
# CARLA world and scenario handlers
world = None
manager = None
def __init__(self, args):
"""
Setup CARLA client and world
Setup ScenarioManager
"""
# First of all, we need to create the client that will send the requests
# to the simulator. Here we'll assume the simulator is accepting
# requests in the localhost at port 2000.
client = carla.Client(args.host, int(args.port))
client.set_timeout(self.client_timeout)
# Once we have a client we can retrieve the world that is currently
# running.
self.world = client.get_world()
# Wait for the world to be ready
self.world.wait_for_tick(self.wait_for_world)
# Create scenario manager
self.manager = ScenarioManager(self.world, args.debug)
def __del__(self):
"""
Cleanup and delete actors, ScenarioManager and CARLA world
"""
self.cleanup(True)
if self.manager is not None:
del self.manager
if self.world is not None:
del self.world
@staticmethod
def get_scenario_class_or_fail(scenario):
"""
Get scenario class by scenario name
If scenario is not supported or not found, exit script
"""
for scenarios in SCENARIOS.values():
if scenario in scenarios:
if scenario in globals():
return globals()[scenario]
print("Scenario '{}' not supported ... Exiting".format(scenario))
sys.exit(-1)
def cleanup(self, ego=False):
"""
Remove and destroy all actors
"""
# We need enumerate here, otherwise the actors are not properly removed
for i, _ in enumerate(self.actors):
if self.actors[i] is not None:
self.actors[i].destroy()
self.actors[i] = None
self.actors = []
if ego and self.ego_vehicle is not None:
self.ego_vehicle.destroy()
self.ego_vehicle = None
def setup_vehicle(self, model, spawn_point, hero=False):
"""
Function to setup the most relevant vehicle parameters,
incl. spawn point and vehicle model.
"""
blueprint_library = self.world.get_blueprint_library()
# Get vehicle by model
blueprint = random.choice(blueprint_library.filter(model))
if hero:
blueprint.set_attribute('role_name', 'hero')
else:
blueprint.set_attribute('role_name', 'scenario')
vehicle = self.world.try_spawn_actor(blueprint, spawn_point)
if vehicle is None:
raise Exception(
"Error: Unable to spawn vehicle {} at {}".format(model, spawn_point))
else:
# Let's deactivate the autopilot of the vehicle
vehicle.set_autopilot(False)
return vehicle
def prepare_actors(self, config):
"""
Spawn or update all scenario actors according to
their parameters provided in config
"""
# If ego_vehicle already exists, just update location
# Otherwise spawn ego vehicle
if self.ego_vehicle is None:
self.ego_vehicle = self.setup_vehicle(config.ego_vehicle.model, config.ego_vehicle.transform, True)
else:
self.ego_vehicle.set_transform(config.ego_vehicle.transform)
# spawn all other actors
for actor in config.other_actors:
new_actor = self.setup_vehicle(actor.model, actor.transform)
self.actors.append(new_actor)
def analyze_scenario(self, args, config):
"""
Provide feedback about success/failure of a scenario
"""
current_time = str(datetime.now().strftime('%Y-%m-%d-%H-%M-%S'))
junit_filename = None
if args.junit:
junit_filename = config.name + current_time + ".xml"
filename = None
if args.file:
filename = config.name + current_time + ".txt"
if not self.manager.analyze_scenario(args.output, filename, junit_filename):
print("Success!")
else:
print("Failure!")
def run(self, args):
"""
Run all scenarios according to provided commandline args
"""
# Setup and run the scenarios for repetition times
for _ in range(int(args.repetitions)):
# Load the scenario configurations provided in the config file
scenario_configurations = None
if args.scenario.startswith("group:"):
scenario_configurations = parse_scenario_configuration(args.scenario, args.scenario)
else:
scenario_config_file = find_scenario_config(args.scenario)
if scenario_config_file is None:
print("Configuration for scenario {} cannot be found!".format(args.scenario))
continue
scenario_configurations = parse_scenario_configuration(scenario_config_file, args.scenario)
# Execute each configuration
for config in scenario_configurations:
# Prepare scenario
print("Preparing scenario: " + config.name)
scenario_class = ScenarioRunner.get_scenario_class_or_fail(config.type)
try:
self.prepare_actors(config)
scenario = scenario_class(self.world,
self.ego_vehicle,
self.actors,
config.town,
args.randomize,
args.debug)
except Exception as exception:
print("The scenario cannot be loaded")
traceback.print_exc()
print(exception)
self.cleanup()
continue
# Load scenario and run it
self.manager.load_scenario(scenario)
self.manager.run_scenario()
# Provide outputs if required
self.analyze_scenario(args, config)
# Stop scenario and cleanup
self.manager.stop_scenario()
del scenario
self.cleanup()
print("No more scenarios .... Exiting")
if __name__ == '__main__':
DESCRIPTION = ("CARLA Scenario Runner: Setup, Run and Evaluate scenarios using CARLA\n"
"Current version: " + str(VERSION))
PARSER = argparse.ArgumentParser(description=DESCRIPTION,
formatter_class=RawTextHelpFormatter)
PARSER.add_argument('--host', default='127.0.0.1',
help='IP of the host server (default: localhost)')
PARSER.add_argument('--port', default='2000',
help='TCP port to listen to (default: 2000)')
PARSER.add_argument('--debug', action="store_true", help='Run with debug output')
PARSER.add_argument('--output', action="store_true", help='Provide results on stdout')
PARSER.add_argument('--file', action="store_true", help='Write results into a txt file')
PARSER.add_argument('--junit', action="store_true", help='Write results into a junit file')
# pylint: disable=line-too-long
PARSER.add_argument(
'--scenario', help='Name of the scenario to be executed. Use the preposition \'group:\' to run all scenarios of one class, e.g. ControlLoss or FollowLeadingVehicle')
# pylint: enable=line-too-long
PARSER.add_argument('--randomize', action="store_true", help='Scenario parameters are randomized')
PARSER.add_argument('--repetitions', default=1, help='Number of scenario executions')
PARSER.add_argument('--list', action="store_true", help='List all supported scenarios and exit')
PARSER.add_argument('--list_class', action="store_true", help='List all supported scenario classes and exit')
PARSER.add_argument('-v', '--version', action='version', version='%(prog)s ' + str(VERSION))
ARGUMENTS = PARSER.parse_args()
if ARGUMENTS.list:
print("Currently the following scenarios are supported:")
print(*get_list_of_scenarios(), sep='\n')
sys.exit(0)
if ARGUMENTS.list_class:
print("Currently the following scenario classes are supported:")
print(*SCENARIOS.keys(), sep='\n')
sys.exit(0)
if ARGUMENTS.scenario is None:
print("Please specify a scenario using '--scenario SCENARIONAME'\n\n")
PARSER.print_help(sys.stdout)
sys.exit(0)
try:
SCENARIORUNNER = ScenarioRunner(ARGUMENTS)
SCENARIORUNNER.run(ARGUMENTS)
finally:
del SCENARIORUNNER
| {"/srunner/scenarios/object_crash_intersection.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/opposite_vehicle_taking_priority.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/challenge/challenge_evaluator.py": ["/srunner/challenge/envs/sensor_interface.py", "/srunner/scenarios/challenge_basic.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/challenge_basic.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenariomanager/atomic_scenario_behavior.py": ["/srunner/scenariomanager/carla_data_provider.py"], "/srunner/scenariomanager/scenario_manager.py": ["/srunner/scenariomanager/carla_data_provider.py", "/srunner/scenariomanager/traffic_events.py"], "/scenario_runner.py": ["/srunner/scenarios/follow_leading_vehicle.py", "/srunner/scenarios/opposite_vehicle_taking_priority.py", "/srunner/scenarios/object_crash_vehicle.py", "/srunner/scenarios/object_crash_intersection.py", "/srunner/scenarios/control_loss.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/basic_scenario.py": ["/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/control_loss.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/follow_leading_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/object_crash_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"]} |
67,381 | chauvinSimon/scenario_runner | refs/heads/master | /srunner/scenarios/basic_scenario.py | #!/usr/bin/env python
# Copyright (c) 2018-2019 Intel Labs.
# authors: Fabian Oboril (fabian.oboril@intel.com)
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
This module provide the basic class for all user-defined scenarios.
"""
from __future__ import print_function
import py_trees
from srunner.scenariomanager.scenario_manager import Scenario
def get_location_in_distance(actor, distance):
"""
Obtain a location in a given distance from the current actor's location.
Note: Search is stopped on first intersection.
@return obtained location and the traveled distance
"""
waypoint = actor.get_world().get_map().get_waypoint(actor.get_location())
traveled_distance = 0
while not waypoint.is_intersection and traveled_distance < distance:
waypoint_new = waypoint.next(1.0)[-1]
traveled_distance += waypoint_new.transform.location.distance(waypoint.transform.location)
waypoint = waypoint_new
return waypoint.transform.location, traveled_distance
class BasicScenario(object):
"""
Base class for user-defined scenario
"""
_town = None # Name of the map that is used
category = None # Scenario category, e.g. control_loss, follow_leading_vehicle, ...
name = None # Name of the scenario
criteria_list = [] # List of evaluation criteria
timeout = 60 # Timeout of scenario in seconds
scenario = None
ego_vehicle = None
other_actors = []
def __init__(self, name, ego_vehicle, other_actors, town, world, debug_mode=False, terminate_on_failure=False):
"""
Setup all relevant parameters and create scenario
and instantiate scenario manager
"""
# Check if the CARLA server uses the correct map
self._town = town
self._check_town(world)
self.ego_vehicle = ego_vehicle
self.other_actors = other_actors
self.name = name
self.terminate_on_failure = terminate_on_failure
# Setup scenario
if debug_mode:
py_trees.logging.level = py_trees.logging.Level.DEBUG
behavior = self._create_behavior()
criteria = self._create_test_criteria()
self.scenario = Scenario(behavior, criteria, self.name, self.timeout, self.terminate_on_failure)
def _create_behavior(self):
"""
Pure virtual function to setup user-defined scenario behavior
"""
raise NotImplementedError(
"This function is re-implemented by all scenarios"
"If this error becomes visible the class hierarchy is somehow broken")
def _create_test_criteria(self):
"""
Pure virtual function to setup user-defined evaluation criteria for the
scenario
"""
raise NotImplementedError(
"This function is re-implemented by all scenarios"
"If this error becomes visible the class hierarchy is somehow broken")
def _check_town(self, world):
if world.get_map().name != self._town:
print("The CARLA server uses the wrong map!")
print("This scenario requires to use map {}".format(self._town))
raise Exception("The CARLA server uses the wrong map!")
| {"/srunner/scenarios/object_crash_intersection.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/opposite_vehicle_taking_priority.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/challenge/challenge_evaluator.py": ["/srunner/challenge/envs/sensor_interface.py", "/srunner/scenarios/challenge_basic.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/challenge_basic.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenariomanager/atomic_scenario_behavior.py": ["/srunner/scenariomanager/carla_data_provider.py"], "/srunner/scenariomanager/scenario_manager.py": ["/srunner/scenariomanager/carla_data_provider.py", "/srunner/scenariomanager/traffic_events.py"], "/scenario_runner.py": ["/srunner/scenarios/follow_leading_vehicle.py", "/srunner/scenarios/opposite_vehicle_taking_priority.py", "/srunner/scenarios/object_crash_vehicle.py", "/srunner/scenarios/object_crash_intersection.py", "/srunner/scenarios/control_loss.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/basic_scenario.py": ["/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/control_loss.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/follow_leading_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/object_crash_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"]} |
67,382 | chauvinSimon/scenario_runner | refs/heads/master | /srunner/challenge/autoagents/HumanAgent.py | import cv2
import numpy as np
import time
from threading import Thread
try:
import pygame
from pygame.locals import K_DOWN
from pygame.locals import K_LEFT
from pygame.locals import K_RIGHT
from pygame.locals import K_UP
from pygame.locals import K_a
from pygame.locals import K_d
from pygame.locals import K_s
from pygame.locals import K_w
except ImportError:
raise RuntimeError('cannot import pygame, make sure pygame package is installed')
import carla
from srunner.challenge.autoagents.autonomous_agent import AutonomousAgent
class HumanInterface():
"""
Class to control a vehicle manually for debugging purposes
"""
def __init__(self, parent):
self.quit = False
self._parent = parent
self.WIDTH = 800
self.HEIGHT = 600
self.THROTTLE_DELTA = 0.05
self.STEERING_DELTA = 0.01
pygame.init()
pygame.font.init()
self._clock = pygame.time.Clock()
self._display = pygame.display.set_mode((self.WIDTH, self.HEIGHT), pygame.HWSURFACE | pygame.DOUBLEBUF)
pygame.display.set_caption("Human Agent")
def run(self):
while not self._parent.agent_engaged:
time.sleep(0.5)
throttle = 0
steering = 0
brake = 0
while not self.quit:
self._clock.tick_busy_loop(20)
# Process events
keys = pygame.key.get_pressed()
if keys[K_UP] or keys[K_w]:
throttle += self.THROTTLE_DELTA
elif keys[K_DOWN] or keys[K_s]:
brake += 4*self.THROTTLE_DELTA
throttle = 0.0
else:
throttle -= self.THROTTLE_DELTA / 5.0
brake = 0.0
if keys[K_LEFT] or keys[K_a]:
steering -= self.STEERING_DELTA
elif keys[K_RIGHT] or keys[K_d]:
steering += self.STEERING_DELTA
else:
steering = 0.0
pygame.event.pump()
# normalize values
steering = min(1.0, max(-1.0, steering))
throttle = min(1.0, max(0.0, throttle))
brake = min(1.0, max(0.0, brake))
self._parent.current_control.steer = steering
self._parent.current_control.throttle = throttle
self._parent.current_control.brake = brake
input_data = self._parent.sensor_interface.get_data()
image_center = input_data['Center'][1]
image_left = input_data['Left'][1]
image_right = input_data['Right'][1]
image_rear = input_data['Rear'][1]
top_row = np.hstack((image_left, image_center, image_right))
bottom_row = np.hstack((0*image_rear, image_rear, 0*image_rear))
comp_image = np.vstack((top_row, bottom_row))
# resize image
image_rescaled = cv2.resize(comp_image, dsize=(self.WIDTH, self.HEIGHT), interpolation=cv2.INTER_CUBIC)
# display image
self._surface = pygame.surfarray.make_surface(image_rescaled.swapaxes(0, 1))
if self._surface is not None:
self._display.blit(self._surface, (0, 0))
pygame.display.flip()
pygame.quit()
class HumanAgent(AutonomousAgent):
def setup(self, path_to_conf_file):
self.agent_engaged = False
self.current_control = carla.VehicleControl()
self.current_control.steer = 0.0
self.current_control.throttle = 1.0
self.current_control.brake = 0.0
self.current_control.hand_brake = False
self._hic = HumanInterface(self)
self._thread = Thread(target=self._hic.run)
self._thread.start()
def sensors(self):
"""
Define the sensor suite required by the agent
:return: a list containing the required sensors in the following format:
[
['sensor.camera.rgb', {'x':x_rel, 'y': y_rel, 'z': z_rel,
'yaw': yaw, 'pitch': pitch, 'roll': roll,
'width': width, 'height': height, 'fov': fov}, 'Sensor01'],
['sensor.camera.rgb', {'x':x_rel, 'y': y_rel, 'z': z_rel,
'yaw': yaw, 'pitch': pitch, 'roll': roll,
'width': width, 'height': height, 'fov': fov}, 'Sensor02'],
['sensor.lidar.ray_cast', {'x':x_rel, 'y': y_rel, 'z': z_rel,
'yaw': yaw, 'pitch': pitch, 'roll': roll}, 'Sensor03']
]
"""
sensors = [{'type': 'sensor.camera.rgb', 'x':0.7, 'y':0.0, 'z':1.60, 'roll':0.0, 'pitch':0.0, 'yaw':0.0,
'width':300, 'height':200, 'fov':100, 'id': 'Center'},
{'type': 'sensor.camera.rgb', 'x':0.7, 'y':-0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0,
'yaw': -45.0, 'width': 300, 'height': 200, 'fov': 100, 'id': 'Left'},
{'type': 'sensor.camera.rgb', 'x':0.7, 'y':0.4, 'z':1.60, 'roll':0.0, 'pitch':0.0, 'yaw':45.0,
'width':300, 'height':200, 'fov':100, 'id': 'Right'},
{'type': 'sensor.camera.rgb', 'x': -1.8, 'y': 0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0,
'yaw': 180.0, 'width': 300, 'height': 200, 'fov': 130, 'id': 'Rear'},
{'type': 'sensor.other.gnss', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'id': 'GPS'}
]
return sensors
def run_step(self, input_data):
self.agent_engaged = True
return self.current_control
def destroy(self):
self._hic.quit = True
self._thread.join()
| {"/srunner/scenarios/object_crash_intersection.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/opposite_vehicle_taking_priority.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/challenge/challenge_evaluator.py": ["/srunner/challenge/envs/sensor_interface.py", "/srunner/scenarios/challenge_basic.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/challenge_basic.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenariomanager/atomic_scenario_behavior.py": ["/srunner/scenariomanager/carla_data_provider.py"], "/srunner/scenariomanager/scenario_manager.py": ["/srunner/scenariomanager/carla_data_provider.py", "/srunner/scenariomanager/traffic_events.py"], "/scenario_runner.py": ["/srunner/scenarios/follow_leading_vehicle.py", "/srunner/scenarios/opposite_vehicle_taking_priority.py", "/srunner/scenarios/object_crash_vehicle.py", "/srunner/scenarios/object_crash_intersection.py", "/srunner/scenarios/control_loss.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/basic_scenario.py": ["/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/control_loss.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/follow_leading_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/object_crash_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"]} |
67,383 | chauvinSimon/scenario_runner | refs/heads/master | /srunner/scenarios/control_loss.py | #!/usr/bin/env python
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
Control Loss Vehicle scenario:
The scenario realizes that the vehicle looses control due to
bad road conditions, etc. and checks to see if the vehicle
regains control and corrects it's course.
"""
import random
import py_trees
from srunner.scenariomanager.atomic_scenario_behavior import *
from srunner.scenariomanager.atomic_scenario_criteria import *
from srunner.scenariomanager.timer import TimeOut
from srunner.scenarios.basic_scenario import *
CONTROL_LOSS_SCENARIOS = [
"ControlLoss"
]
class ControlLoss(BasicScenario):
"""
Implementation of "Control Loss Vehicle" (Traffic Scenario 01)
"""
category = "ControlLoss"
timeout = 60 # Timeout of scenario in seconds
# ego vehicle parameters
_no_of_jitter_actions = 20
_noise_mean = 0 # Mean value of steering noise
_noise_std = 0.02 # Std. deviation of steerning noise
_dynamic_mean = 0.05
_abort_distance_to_intersection = 20
_start_distance = 20
_end_distance = 80
def __init__(self, world, ego_vehicle, other_actors, town, randomize=False, debug_mode=False, config=None):
"""
Setup all relevant parameters and create scenario
"""
super(ControlLoss, self).__init__("ControlLoss",
ego_vehicle,
other_actors,
town,
world,
debug_mode)
def _create_behavior(self):
"""
The scenario defined after is a "control loss vehicle" scenario. After
invoking this scenario, it will wait until the vehicle drove a few meters
(_start_distance), and then perform a jitter action. Finally, the vehicle
has to reach a target point (_end_distance). If this does not happen within
60 seconds, a timeout stops the scenario
"""
# start condition
location, _ = get_location_in_distance(self.ego_vehicle, self._start_distance)
start_condition = InTriggerDistanceToLocation(self.ego_vehicle, location, 2.0)
# jitter sequence
jitter_sequence = py_trees.composites.Sequence("Jitter Sequence Behavior")
jitter_timeout = TimeOut(timeout=0.2, name="Timeout for next jitter")
for i in range(self._no_of_jitter_actions):
ego_vehicle_max_steer = random.gauss(self._noise_mean, self._noise_std)
if ego_vehicle_max_steer > 0:
ego_vehicle_max_steer += self._dynamic_mean
elif ego_vehicle_max_steer < 0:
ego_vehicle_max_steer -= self._dynamic_mean
# turn vehicle
turn = SteerVehicle(self.ego_vehicle, ego_vehicle_max_steer, name='Steering ' + str(i))
jitter_action = py_trees.composites.Parallel("Jitter Actions with Timeouts",
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ALL)
jitter_action.add_child(turn)
jitter_action.add_child(jitter_timeout)
jitter_sequence.add_child(jitter_action)
# Abort jitter_sequence, if the vehicle is approaching an intersection
jitter_abort = InTriggerDistanceToNextIntersection(self.ego_vehicle, self._abort_distance_to_intersection)
jitter = py_trees.composites.Parallel("Jitter",
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
jitter.add_child(jitter_sequence)
jitter.add_child(jitter_abort)
# endcondition: Check if vehicle reached waypoint _end_distance from here:
location, _ = get_location_in_distance(self.ego_vehicle, self._end_distance)
end_condition = InTriggerDistanceToLocation(self.ego_vehicle, location, 2.0)
# Build behavior tree
sequence = py_trees.composites.Sequence("Sequence Behavior")
sequence.add_child(start_condition)
sequence.add_child(jitter)
sequence.add_child(end_condition)
return sequence
def _create_test_criteria(self):
"""
A list of all test criteria will be created that is later used
in parallel behavior tree.
"""
criteria = []
collision_criterion = CollisionTest(self.ego_vehicle)
criteria.append(collision_criterion)
return criteria
| {"/srunner/scenarios/object_crash_intersection.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/opposite_vehicle_taking_priority.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/challenge/challenge_evaluator.py": ["/srunner/challenge/envs/sensor_interface.py", "/srunner/scenarios/challenge_basic.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/challenge_basic.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenariomanager/atomic_scenario_behavior.py": ["/srunner/scenariomanager/carla_data_provider.py"], "/srunner/scenariomanager/scenario_manager.py": ["/srunner/scenariomanager/carla_data_provider.py", "/srunner/scenariomanager/traffic_events.py"], "/scenario_runner.py": ["/srunner/scenarios/follow_leading_vehicle.py", "/srunner/scenarios/opposite_vehicle_taking_priority.py", "/srunner/scenarios/object_crash_vehicle.py", "/srunner/scenarios/object_crash_intersection.py", "/srunner/scenarios/control_loss.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/basic_scenario.py": ["/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/control_loss.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/follow_leading_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/object_crash_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"]} |
67,384 | chauvinSimon/scenario_runner | refs/heads/master | /srunner/scenarios/follow_leading_vehicle.py | #!/usr/bin/env python
# Copyright (c) 2018-2019 Intel Labs.
# authors: Fabian Oboril (fabian.oboril@intel.com)
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
Follow leading vehicle scenario:
The scenario realizes a common driving behavior, in which the
user-controlled ego vehicle follows a leading car driving down
a given road. At some point the leading car has to slow down and
finally stop. The ego vehicle has to react accordingly to avoid
a collision. The scenario ends either via a timeout, or if the ego
vehicle stopped close enough to the leading vehicle
"""
import random
import py_trees
from srunner.scenariomanager.atomic_scenario_behavior import *
from srunner.scenariomanager.atomic_scenario_criteria import *
from srunner.scenariomanager.timer import TimeOut
from srunner.scenarios.basic_scenario import *
FOLLOW_LEADING_VEHICLE_SCENARIOS = [
"FollowLeadingVehicle",
"FollowLeadingVehicleWithObstacle"
]
class FollowLeadingVehicle(BasicScenario):
"""
This class holds everything required for a simple "Follow a leading vehicle"
scenario involving two vehicles.
"""
category = "FollowLeadingVehicle"
timeout = 120 # Timeout of scenario in seconds
# ego vehicle parameters
_ego_max_velocity_allowed = 20 # Maximum allowed velocity [m/s]
_ego_avg_velocity_expected = 4 # Average expected velocity [m/s]
_ego_other_distance_start = 4 # time to arrival that triggers scenario starts
# other vehicle
_other_actor_max_brake = 1.0 # Maximum brake of other actor
_other_actor_stop_in_front_intersection = 30 # Stop ~30m in front of intersection
def __init__(self, world, ego_vehicle, other_actors, town, randomize=False, debug_mode=False, config=None):
"""
Setup all relevant parameters and create scenario
If randomize is True, the scenario parameters are randomized
"""
super(FollowLeadingVehicle, self).__init__("FollowVehicle",
ego_vehicle,
other_actors,
town,
world,
debug_mode)
if randomize:
self._ego_other_distance_start = random.randint(4, 8)
# Example code how to randomize start location
# distance = random.randint(20, 80)
# new_location, _ = get_location_in_distance(self.ego_vehicle, distance)
# waypoint = world.get_map().get_waypoint(new_location)
# waypoint.transform.location.z += 39
# self.other_actors[0].set_transform(waypoint.transform)
def _create_behavior(self):
"""
The scenario defined after is a "follow leading vehicle" scenario. After
invoking this scenario, it will wait for the user controlled vehicle to
enter the start region, then make the other actor to drive until reaching
the next intersection. Finally, the user-controlled vehicle has to be close
enough to the other actor to end the scenario.
If this does not happen within 60 seconds, a timeout stops the scenario
"""
# start condition
startcondition = py_trees.composites.Parallel(
"Waiting for start position",
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
startcondition.add_child(InTimeToArrivalToLocation(self.ego_vehicle,
self._ego_other_distance_start,
self.other_actors[0].get_location()))
startcondition.add_child(InTriggerDistanceToVehicle(self.ego_vehicle,
self.other_actors[0],
10))
# let the other actor drive until next intersection
# @todo: We should add some feedback mechanism to respond to ego_vehicle behavior
driving_to_next_intersection = py_trees.composites.Parallel(
"Waiting for end position",
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
driving_to_next_intersection.add_child(UseAutoPilot(self.other_actors[0]))
driving_to_next_intersection.add_child(InTriggerDistanceToNextIntersection(
self.other_actors[0], self._other_actor_stop_in_front_intersection))
# stop vehicle
stop = StopVehicle(self.other_actors[0], self._other_actor_max_brake)
# end condition
endcondition = py_trees.composites.Parallel("Waiting for end position",
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ALL)
endcondition_part1 = InTriggerDistanceToVehicle(self.other_actors[0],
self.ego_vehicle,
distance=20,
name="FinalDistance")
endcondition_part2 = StandStill(self.ego_vehicle, name="StandStill")
endcondition.add_child(endcondition_part1)
endcondition.add_child(endcondition_part2)
# Build behavior tree
sequence = py_trees.composites.Sequence("Sequence Behavior")
sequence.add_child(startcondition)
sequence.add_child(driving_to_next_intersection)
sequence.add_child(stop)
sequence.add_child(endcondition)
return sequence
def _create_test_criteria(self):
"""
A list of all test criteria will be created that is later used
in parallel behavior tree.
"""
criteria = []
max_velocity_criterion = MaxVelocityTest(self.ego_vehicle,
self._ego_max_velocity_allowed,
optional=True)
collision_criterion = CollisionTest(self.ego_vehicle)
keep_lane_criterion = KeepLaneTest(self.ego_vehicle)
avg_velocity_criterion = AverageVelocityTest(self.ego_vehicle, self._ego_avg_velocity_expected, optional=True)
criteria.append(max_velocity_criterion)
criteria.append(collision_criterion)
criteria.append(keep_lane_criterion)
criteria.append(avg_velocity_criterion)
# Add the collision and lane checks for all vehicles as well
for vehicle in self.other_actors:
collision_criterion = CollisionTest(vehicle)
keep_lane_criterion = KeepLaneTest(vehicle)
criteria.append(collision_criterion)
criteria.append(keep_lane_criterion)
return criteria
class FollowLeadingVehicleWithObstacle(BasicScenario):
"""
This class holds a scenario similar to FollowLeadingVehicle
but there is a (hidden) obstacle in front of the leading vehicle
"""
category = "FollowLeadingVehicle"
timeout = 120 # Timeout of scenario in seconds
# ego vehicle parameters
_ego_max_velocity_allowed = 20 # Maximum allowed velocity [m/s]
_ego_avg_velocity_expected = 4 # Average expected velocity [m/s]
_ego_other_distance_start = 4 # time to arrival that triggers scenario starts
# other vehicle
_other_actor_max_brake = 1.0 # Maximum brake of other vehicle
_other_actor_stop_in_front_intersection = 30 # Stop ~30m in front of intersection
def __init__(self, world, ego_vehicle, other_actors, town, randomize=False, debug_mode=False):
"""
Setup all relevant parameters and create scenario
"""
super(FollowLeadingVehicleWithObstacle, self).__init__("FollowLeadingVehicleWithObstacle",
ego_vehicle,
other_actors,
town,
world,
debug_mode)
if randomize:
self._ego_other_distance_start = random.randint(2, 8)
def _create_behavior(self):
"""
The scenario defined after is a "follow leading vehicle" scenario. After
invoking this scenario, it will wait for the user controlled vehicle to
enter the start region, then make the other actor to drive until reaching
the next intersection. Finally, the user-controlled vehicle has to be close
enough to the other actor to end the scenario.
If this does not happen within 60 seconds, a timeout stops the scenario
"""
# start condition
startcondition = InTimeToArrivalToLocation(self.ego_vehicle,
self._ego_other_distance_start,
self.other_actors[0].get_location(),
name="Waiting for start position")
# let the other actor drive until next intersection
# @todo: We should add some feedback mechanism to respond to ego_vehicle behavior
driving_to_next_intersection = py_trees.composites.Parallel(
"Waiting for end position",
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
driving_considering_bike = py_trees.composites.Parallel(
"Drive with AutoPilot",
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ALL)
driving_considering_bike.add_child(UseAutoPilot(self.other_actors[0]))
obstacle_sequence = py_trees.composites.Sequence("Obstacle sequence behavior")
obstacle_sequence.add_child(InTriggerDistanceToVehicle(self.other_actors[0],
self.other_actors[1],
10))
obstacle_sequence.add_child(TimeOut(5))
obstacle_clear_road = py_trees.composites.Parallel("Obstalce clearing road",
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
obstacle_clear_road.add_child(DriveDistance(self.other_actors[1], 4))
obstacle_clear_road.add_child(KeepVelocity(self.other_actors[1], 5))
obstacle_sequence.add_child(obstacle_clear_road)
obstacle_sequence.add_child(StopVehicle(self.other_actors[1], self._other_actor_max_brake))
driving_considering_bike.add_child(obstacle_sequence)
driving_to_next_intersection.add_child(InTriggerDistanceToNextIntersection(
self.other_actors[0], self._other_actor_stop_in_front_intersection))
driving_to_next_intersection.add_child(driving_considering_bike)
# stop vehicle
stop = StopVehicle(self.other_actors[0], self._other_actor_max_brake)
# end condition
endcondition = py_trees.composites.Parallel("Waiting for end position",
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ALL)
endcondition_part1 = InTriggerDistanceToVehicle(self.other_actors[0],
self.ego_vehicle,
distance=20,
name="FinalDistance")
endcondition_part2 = StandStill(self.ego_vehicle, name="FinalSpeed")
endcondition.add_child(endcondition_part1)
endcondition.add_child(endcondition_part2)
# Build behavior tree
sequence = py_trees.composites.Sequence("Sequence Behavior")
sequence.add_child(startcondition)
sequence.add_child(driving_to_next_intersection)
sequence.add_child(stop)
sequence.add_child(endcondition)
return sequence
def _create_test_criteria(self):
"""
A list of all test criteria will be created that is later used
in parallel behavior tree.
"""
criteria = []
max_velocity_criterion = MaxVelocityTest(self.ego_vehicle,
self._ego_max_velocity_allowed,
optional=True)
collision_criterion = CollisionTest(self.ego_vehicle)
keep_lane_criterion = KeepLaneTest(self.ego_vehicle)
avg_velocity_criterion = AverageVelocityTest(self.ego_vehicle, self._ego_avg_velocity_expected, optional=True)
criteria.append(max_velocity_criterion)
criteria.append(collision_criterion)
criteria.append(keep_lane_criterion)
criteria.append(avg_velocity_criterion)
# Add the collision and lane checks for all vehicles as well
for vehicle in self.other_actors:
collision_criterion = CollisionTest(vehicle)
keep_lane_criterion = KeepLaneTest(vehicle)
criteria.append(collision_criterion)
criteria.append(keep_lane_criterion)
return criteria
| {"/srunner/scenarios/object_crash_intersection.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/opposite_vehicle_taking_priority.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/challenge/challenge_evaluator.py": ["/srunner/challenge/envs/sensor_interface.py", "/srunner/scenarios/challenge_basic.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/challenge_basic.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenariomanager/atomic_scenario_behavior.py": ["/srunner/scenariomanager/carla_data_provider.py"], "/srunner/scenariomanager/scenario_manager.py": ["/srunner/scenariomanager/carla_data_provider.py", "/srunner/scenariomanager/traffic_events.py"], "/scenario_runner.py": ["/srunner/scenarios/follow_leading_vehicle.py", "/srunner/scenarios/opposite_vehicle_taking_priority.py", "/srunner/scenarios/object_crash_vehicle.py", "/srunner/scenarios/object_crash_intersection.py", "/srunner/scenarios/control_loss.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/basic_scenario.py": ["/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/control_loss.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/follow_leading_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/object_crash_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"]} |
67,385 | chauvinSimon/scenario_runner | refs/heads/master | /srunner/scenarios/object_crash_vehicle.py | #!/usr/bin/env python
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
Object crash without prior vehicle action scenario:
The scenario realizes the user controlled ego vehicle
moving along the road and encountering a cyclist ahead.
"""
import py_trees
from srunner.scenariomanager.atomic_scenario_behavior import *
from srunner.scenariomanager.atomic_scenario_criteria import *
from srunner.scenariomanager.timer import TimeOut
from srunner.scenarios.basic_scenario import *
OBJECT_CROSSING_SCENARIOS = [
"StationaryObjectCrossing",
"DynamicObjectCrossing"
]
class StationaryObjectCrossing(BasicScenario):
"""
This class holds everything required for a simple object crash
without prior vehicle action involving a vehicle and a cyclist.
The ego vehicle is passing through a road and encounters
a stationary cyclist.
"""
category = "ObjectCrossing"
timeout = 60
# ego vehicle parameters
_ego_vehicle_velocity_allowed = 20
_ego_vehicle_distance_to_other = 35
def __init__(self, world, ego_vehicle, other_actors, town, randomize=False, debug_mode=False, config=None):
"""
Setup all relevant parameters and create scenario
"""
super(StationaryObjectCrossing, self).__init__("Stationaryobjectcrossing",
ego_vehicle,
other_actors,
town,
world,
debug_mode)
def _create_behavior(self):
"""
Only behavior here is to wait
"""
redundant = TimeOut(self.timeout - 5)
return redundant
def _create_test_criteria(self):
"""
A list of all test criteria will be created
that is later used in parallel behavior tree.
"""
criteria = []
max_velocity_criterion = MaxVelocityTest(
self.ego_vehicle,
self._ego_vehicle_velocity_allowed,
optional=True)
collision_criterion = CollisionTest(self.ego_vehicle)
keep_lane_criterion = KeepLaneTest(self.ego_vehicle, optional=True)
driven_distance_criterion = DrivenDistanceTest(
self.ego_vehicle,
self._ego_vehicle_distance_to_other)
criteria.append(max_velocity_criterion)
criteria.append(collision_criterion)
criteria.append(keep_lane_criterion)
criteria.append(driven_distance_criterion)
return criteria
class DynamicObjectCrossing(BasicScenario):
"""
This class holds everything required for a simple object crash
without prior vehicle action involving a vehicle and a cyclist,
The ego vehicle is passing through a road,
And encounters a cyclist crossing the road.
"""
category = "ObjectCrossing"
timeout = 60
# ego vehicle parameters
_ego_vehicle_velocity_allowed = 10
_ego_vehicle_distance_driven = 50
# other vehicle parameters
_other_actor_target_velocity = 10
_trigger_distance_from_ego = 35
_other_actor_max_throttle = 1.0
_other_actor_max_brake = 1.0
def __init__(self, world, ego_vehicle, other_actors, town, randomize=False, debug_mode=False):
"""
Setup all relevant parameters and create scenario
"""
super(DynamicObjectCrossing, self).__init__("Dynamicobjectcrossing",
ego_vehicle,
other_actors,
town,
world,
debug_mode)
def _create_behavior(self):
"""
After invoking this scenario, cyclist will wait for the user
controlled vehicle to enter the in the trigger distance region,
the cyclist starts crossing the road once the condition meets,
then after 60 seconds, a timeout stops the scenario
"""
# leaf nodes
trigger_dist = InTriggerDistanceToVehicle(
self.other_actors[0],
self.ego_vehicle,
self._trigger_distance_from_ego)
start_other_actor = KeepVelocity(
self.other_actors[0],
self._other_actor_target_velocity)
trigger_other = InTriggerRegion(
self.other_actors[0],
46, 50,
128, 129.5)
stop_other_actor = StopVehicle(
self.other_actors[0],
self._other_actor_max_brake)
timeout_other = TimeOut(10)
start_vehicle = KeepVelocity(
self.other_actors[0],
self._other_actor_target_velocity)
trigger_other_actor = InTriggerRegion(
self.other_actors[0],
46, 50,
137, 139)
stop_vehicle = StopVehicle(
self.other_actors[0],
self._other_actor_max_brake)
timeout_other_actor = TimeOut(3)
# non leaf nodes
root = py_trees.composites.Parallel(
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
scenario_sequence = py_trees.composites.Sequence()
keep_velocity_other_parallel = py_trees.composites.Parallel(
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
keep_velocity_other = py_trees.composites.Parallel(
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
# building tree
root.add_child(scenario_sequence)
scenario_sequence.add_child(trigger_dist)
scenario_sequence.add_child(keep_velocity_other_parallel)
scenario_sequence.add_child(stop_other_actor)
scenario_sequence.add_child(timeout_other)
scenario_sequence.add_child(keep_velocity_other)
scenario_sequence.add_child(stop_vehicle)
scenario_sequence.add_child(timeout_other_actor)
keep_velocity_other_parallel.add_child(start_other_actor)
keep_velocity_other_parallel.add_child(trigger_other)
keep_velocity_other.add_child(start_vehicle)
keep_velocity_other.add_child(trigger_other_actor)
return root
def _create_test_criteria(self):
"""
A list of all test criteria will be created that is later used
in parallel behavior tree.
"""
criteria = []
max_velocity_criterion = MaxVelocityTest(
self.ego_vehicle,
self._ego_vehicle_velocity_allowed,
optional=True)
collision_criterion = CollisionTest(self.ego_vehicle)
keep_lane_criterion = KeepLaneTest(self.ego_vehicle, optional=True)
driven_distance_criterion = DrivenDistanceTest(
self.ego_vehicle, self._ego_vehicle_distance_driven)
criteria.append(max_velocity_criterion)
criteria.append(collision_criterion)
criteria.append(keep_lane_criterion)
criteria.append(driven_distance_criterion)
return criteria
| {"/srunner/scenarios/object_crash_intersection.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/opposite_vehicle_taking_priority.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/challenge/challenge_evaluator.py": ["/srunner/challenge/envs/sensor_interface.py", "/srunner/scenarios/challenge_basic.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/challenge_basic.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenariomanager/atomic_scenario_behavior.py": ["/srunner/scenariomanager/carla_data_provider.py"], "/srunner/scenariomanager/scenario_manager.py": ["/srunner/scenariomanager/carla_data_provider.py", "/srunner/scenariomanager/traffic_events.py"], "/scenario_runner.py": ["/srunner/scenarios/follow_leading_vehicle.py", "/srunner/scenarios/opposite_vehicle_taking_priority.py", "/srunner/scenarios/object_crash_vehicle.py", "/srunner/scenarios/object_crash_intersection.py", "/srunner/scenarios/control_loss.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/basic_scenario.py": ["/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/control_loss.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/follow_leading_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/object_crash_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"]} |
67,386 | chauvinSimon/scenario_runner | refs/heads/master | /srunner/scenariomanager/carla_data_provider.py | #!/usr/bin/env python
# Copyright (c) 2018 Intel Labs.
# authors: Fabian Oboril (fabian.oboril@intel.com)
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
This module provides all frequently used data from CARLA via
local buffers to avoid blocking calls to CARLA
"""
import math
def calculate_velocity(actor):
"""
Method to calculate the velocity of a actor
"""
velocity_squared = actor.get_velocity().x**2
velocity_squared += actor.get_velocity().y**2
return math.sqrt(velocity_squared)
class CarlaDataProvider(object):
"""
This class provides access to various data of all registered actors
It buffers the data and updates it on every CARLA tick
Currently available data:
- Absolute velocity
- Location
Potential additions:
- Acceleration
- Transform
"""
_actor_velocity_map = dict()
_actor_location_map = dict()
@staticmethod
def register_actor(actor):
"""
Add new actor to dictionaries
If actor already exists, throw an exception
"""
if actor in CarlaDataProvider._actor_velocity_map:
raise KeyError(
"Vehicle '{}' already registered. Cannot register twice!".format(actor))
else:
CarlaDataProvider._actor_velocity_map[actor] = 0.0
if actor in CarlaDataProvider._actor_location_map:
raise KeyError(
"Vehicle '{}' already registered. Cannot register twice!".format(actor.id))
else:
CarlaDataProvider._actor_location_map[actor] = None
@staticmethod
def register_actors(actors):
"""
Add new set of actors to dictionaries
"""
for actor in actors:
CarlaDataProvider.register_actor(actor)
@staticmethod
def on_carla_tick():
"""
Callback from CARLA
"""
for actor in CarlaDataProvider._actor_velocity_map:
if actor is not None and actor.is_alive:
CarlaDataProvider._actor_velocity_map[actor] = calculate_velocity(actor)
for actor in CarlaDataProvider._actor_location_map:
if actor is not None and actor.is_alive:
CarlaDataProvider._actor_location_map[actor] = actor.get_location()
@staticmethod
def get_velocity(actor):
"""
returns the absolute velocity for the given actor
"""
if actor not in CarlaDataProvider._actor_velocity_map.keys():
# We are initentionally not throwing here
# This may cause exception loops in py_trees
return 0.0
else:
return CarlaDataProvider._actor_velocity_map[actor]
@staticmethod
def get_location(actor):
"""
returns the location for the given actor
"""
if actor not in CarlaDataProvider._actor_location_map.keys():
# We are initentionally not throwing here
# This may cause exception loops in py_trees
return None
else:
return CarlaDataProvider._actor_location_map[actor]
@staticmethod
def cleanup():
"""
Cleanup and remove all entries from all dictionaries
"""
CarlaDataProvider._actor_velocity_map.clear()
CarlaDataProvider._actor_location_map.clear()
| {"/srunner/scenarios/object_crash_intersection.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/opposite_vehicle_taking_priority.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/challenge/challenge_evaluator.py": ["/srunner/challenge/envs/sensor_interface.py", "/srunner/scenarios/challenge_basic.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/challenge_basic.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenariomanager/atomic_scenario_behavior.py": ["/srunner/scenariomanager/carla_data_provider.py"], "/srunner/scenariomanager/scenario_manager.py": ["/srunner/scenariomanager/carla_data_provider.py", "/srunner/scenariomanager/traffic_events.py"], "/scenario_runner.py": ["/srunner/scenarios/follow_leading_vehicle.py", "/srunner/scenarios/opposite_vehicle_taking_priority.py", "/srunner/scenarios/object_crash_vehicle.py", "/srunner/scenarios/object_crash_intersection.py", "/srunner/scenarios/control_loss.py", "/srunner/scenarios/config_parser.py", "/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/basic_scenario.py": ["/srunner/scenariomanager/scenario_manager.py"], "/srunner/scenarios/control_loss.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/follow_leading_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"], "/srunner/scenarios/object_crash_vehicle.py": ["/srunner/scenariomanager/atomic_scenario_behavior.py", "/srunner/scenarios/basic_scenario.py"]} |
67,419 | 97wave/dz_ORM.2_4 | refs/heads/master | /m2m-relations/articles/migrations/0005_alter_article_tags.py | # Generated by Django 3.2.7 on 2021-09-08 00:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0004_auto_20210908_0201'),
]
operations = [
migrations.AlterField(
model_name='article',
name='tags',
field=models.ManyToManyField(through='articles.Relations', to='articles.Scope', verbose_name='Темы статьи'),
),
]
| {"/m2m-relations/articles/admin.py": ["/m2m-relations/articles/models.py"]} |
67,420 | 97wave/dz_ORM.2_4 | refs/heads/master | /m2m-relations/articles/models.py | from django.db import models
from .settings_local import *
class Article(models.Model):
title = models.CharField(max_length=256, verbose_name='Название')
text = models.TextField(verbose_name='Текст')
published_at = models.DateTimeField(verbose_name='Дата публикации')
image = models.ImageField(null=True, blank=True, verbose_name='Изображение',)
class Meta:
verbose_name = 'Статья'
verbose_name_plural = 'Статьи'
ordering = ['-published_at']
def __str__(self):
return self.title
class Scope(models.Model):
name = models.CharField(max_length=25, verbose_name='Название')
relation = models.ManyToManyField(Article, through='Relationship', verbose_name='Темы статьи')
class Meta:
verbose_name = 'Тема статьи'
verbose_name_plural = 'Темы статьи'
ordering = ['-name']
def __str__(self):
return self.tag
class Relationship(models.Model):
articles = models.ForeignKey(Article, related_name='scopes', on_delete=models.CASCADE, verbose_name='Статья')
tag = models.ForeignKey(Scope, on_delete=models.CASCADE, verbose_name='Раздел')
is_main = models.BooleanField(default=False, verbose_name='Основной')
class Meta:
verbose_name = 'Тема статьи'
verbose_name_plural = 'Темы статьи'
ordering = ['-is_main']
| {"/m2m-relations/articles/admin.py": ["/m2m-relations/articles/models.py"]} |
67,421 | 97wave/dz_ORM.2_4 | refs/heads/master | /m2m-relations/articles/migrations/0003_auto_20210908_0131.py | # Generated by Django 3.2.7 on 2021-09-07 22:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0002_auto_20210908_0116'),
]
operations = [
migrations.AlterModelOptions(
name='scope',
options={'ordering': ['-title'], 'verbose_name': 'Тема статьи', 'verbose_name_plural': 'Темы статьи'},
),
migrations.AlterField(
model_name='article',
name='tags',
field=models.ManyToManyField(related_name='scopes', through='articles.Relations', to='articles.Scope'),
),
]
| {"/m2m-relations/articles/admin.py": ["/m2m-relations/articles/models.py"]} |
67,422 | 97wave/dz_ORM.2_4 | refs/heads/master | /m2m-relations/articles/admin.py | from django.contrib import admin
from django.core.exceptions import ValidationError
from django.forms import BaseInlineFormSet
from .models import Article, Relationship, Scope
import json
class RelationshipInlineFormset(BaseInlineFormSet):
def clean(self):
super(RelationshipInlineFormset, self).clean()
cnt = 0
for form in self.forms:
if not form.is_valid():
return
if form.cleaned_data and not form.cleaned_data.get('DELETE'):
if form.cleaned_data['is_main']:
cnt += 1
if cnt > 1:
raise ValidationError('У же есть один основной раздел, выебирте один!')
if cnt < 1:
raise ValidationError("Вам необходимо выбрать один основной раздел!")
return super().clean()
class RelationshipInline(admin.TabularInline):
model = Scope.relation.through
extra = 1
formset = RelationshipInlineFormset
@admin.register(Article)
class ArticleAdmin(admin.ModelAdmin):
inlines = [RelationshipInline]
exclude = ('relation',)
@admin.register(Scope)
class ScopeAdmin(admin.ModelAdmin):
inlines = [RelationshipInline] | {"/m2m-relations/articles/admin.py": ["/m2m-relations/articles/models.py"]} |
67,423 | 97wave/dz_ORM.2_4 | refs/heads/master | /m2m-relations/articles/migrations/0010_auto_20210922_1400.py | # Generated by Django 3.1.2 on 2021-09-22 11:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('articles', '0009_auto_20210922_1350'),
]
operations = [
migrations.AlterModelOptions(
name='scope',
options={'ordering': ['-name'], 'verbose_name': 'Тема статьи', 'verbose_name_plural': 'Темы статьи'},
),
migrations.RenameField(
model_name='relationship',
old_name='scopes',
new_name='tag',
),
migrations.RenameField(
model_name='scope',
old_name='tag',
new_name='name',
),
]
| {"/m2m-relations/articles/admin.py": ["/m2m-relations/articles/models.py"]} |
67,424 | 97wave/dz_ORM.2_4 | refs/heads/master | /m2m-relations/articles/migrations/0004_auto_20210908_0201.py | # Generated by Django 3.2.7 on 2021-09-07 23:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('articles', '0003_auto_20210908_0131'),
]
operations = [
migrations.AlterModelOptions(
name='relations',
options={'ordering': ['-is_main'], 'verbose_name': 'Тема статьи', 'verbose_name_plural': 'Темы статьи'},
),
migrations.AlterField(
model_name='article',
name='tags',
field=models.ManyToManyField(related_name='scopes', through='articles.Relations', to='articles.Scope', verbose_name='Темы статьи'),
),
migrations.AlterField(
model_name='relations',
name='article',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.article', verbose_name='Статья'),
),
migrations.AlterField(
model_name='relations',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.scope', verbose_name='Раздел'),
),
migrations.AlterField(
model_name='relations',
name='is_main',
field=models.BooleanField(verbose_name='Основной'),
),
]
| {"/m2m-relations/articles/admin.py": ["/m2m-relations/articles/models.py"]} |
67,425 | 97wave/dz_ORM.2_4 | refs/heads/master | /m2m-relations/articles/migrations/0008_auto_20210912_2310.py | # Generated by Django 3.1.2 on 2021-09-12 20:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('articles', '0007_auto_20210912_2111'),
]
operations = [
migrations.CreateModel(
name='Relationship',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_main', models.BooleanField(verbose_name='Основной')),
],
options={
'verbose_name': 'Тема статьи',
'verbose_name_plural': 'Темы статьи',
'ordering': ['-is_main'],
},
),
migrations.RemoveField(
model_name='article',
name='scopes',
),
migrations.DeleteModel(
name='Relations',
),
migrations.AddField(
model_name='relationship',
name='articles',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.article', verbose_name='Статья'),
),
migrations.AddField(
model_name='relationship',
name='scopes',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.scope', verbose_name='Раздел'),
),
migrations.AddField(
model_name='article',
name='relation',
field=models.ManyToManyField(through='articles.Relationship', to='articles.Scope', verbose_name='Темы статьи'),
),
]
| {"/m2m-relations/articles/admin.py": ["/m2m-relations/articles/models.py"]} |
67,426 | 97wave/dz_ORM.2_4 | refs/heads/master | /orm_migrations/school/migrations/0004_auto_20210912_1923.py | # Generated by Django 3.1.2 on 2021-09-12 16:23
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('school', '0003_auto_20210912_1909'),
]
operations = [
migrations.RenameField(
model_name='student',
old_name='teacher',
new_name='teachers',
),
]
| {"/m2m-relations/articles/admin.py": ["/m2m-relations/articles/models.py"]} |
67,427 | 97wave/dz_ORM.2_4 | refs/heads/master | /m2m-relations/articles/migrations/0006_auto_20210912_2105.py | # Generated by Django 3.1.2 on 2021-09-12 18:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('articles', '0005_alter_article_tags'),
]
operations = [
migrations.RenameField(
model_name='article',
old_name='tags',
new_name='scopes',
),
]
| {"/m2m-relations/articles/admin.py": ["/m2m-relations/articles/models.py"]} |
67,428 | 97wave/dz_ORM.2_4 | refs/heads/master | /m2m-relations/articles/migrations/0011_auto_20210922_1405.py | # Generated by Django 3.1.2 on 2021-09-22 11:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('articles', '0010_auto_20210922_1400'),
]
operations = [
migrations.AlterField(
model_name='relationship',
name='articles',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='scopes', to='articles.article', verbose_name='Статья'),
),
migrations.AlterField(
model_name='scope',
name='relation',
field=models.ManyToManyField(through='articles.Relationship', to='articles.Article', verbose_name='Темы статьи'),
),
]
| {"/m2m-relations/articles/admin.py": ["/m2m-relations/articles/models.py"]} |
67,429 | 97wave/dz_ORM.2_4 | refs/heads/master | /m2m-relations/articles/migrations/0007_auto_20210912_2111.py | # Generated by Django 3.1.2 on 2021-09-12 18:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('articles', '0006_auto_20210912_2105'),
]
operations = [
migrations.AlterModelOptions(
name='scope',
options={'ordering': ['-tag'], 'verbose_name': 'Тема статьи', 'verbose_name_plural': 'Темы статьи'},
),
migrations.RenameField(
model_name='scope',
old_name='title',
new_name='tag',
),
]
| {"/m2m-relations/articles/admin.py": ["/m2m-relations/articles/models.py"]} |
67,430 | 97wave/dz_ORM.2_4 | refs/heads/master | /m2m-relations/articles/migrations/0009_auto_20210922_1350.py | # Generated by Django 3.1.2 on 2021-09-22 10:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0008_auto_20210912_2310'),
]
operations = [
migrations.RemoveField(
model_name='article',
name='relation',
),
migrations.AddField(
model_name='scope',
name='relation',
field=models.ManyToManyField(related_name='scopes', through='articles.Relationship', to='articles.Article', verbose_name='Темы статьи'),
),
migrations.AlterField(
model_name='relationship',
name='is_main',
field=models.BooleanField(default=False, verbose_name='Основной'),
),
]
| {"/m2m-relations/articles/admin.py": ["/m2m-relations/articles/models.py"]} |
67,436 | HamaguchiKazuki/become-yukarin | refs/heads/master | /become_yukarin/dataset/dataset.py | import copy
import glob
import typing
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from pathlib import Path
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
import scipy.ndimage
import chainer
import librosa
import numpy
import pysptk
import pyworld
from ..config.config import DatasetConfig
from ..config.sr_config import SRDatasetConfig
from ..data_struct import AcousticFeature
from ..data_struct import LowHighSpectrogramFeature
from ..data_struct import Wave
class BaseDataProcess(metaclass=ABCMeta):
@abstractmethod
def __call__(self, data, test):
pass
class LambdaProcess(BaseDataProcess):
def __init__(self, process: Callable[[Any, bool], Any]) -> None:
self._process = process
def __call__(self, data, test):
return self._process(data, test)
class DictKeyReplaceProcess(BaseDataProcess):
def __init__(self, key_map: Dict[str, str]) -> None:
self._key_map = key_map
def __call__(self, data: Dict[str, Any], test):
return {key_after: data[key_before] for key_after, key_before in self._key_map}
class ChainProcess(BaseDataProcess):
def __init__(self, process: typing.Iterable[BaseDataProcess]) -> None:
self._process = list(process)
def __call__(self, data, test):
for p in self._process:
data = p(data, test)
return data
def append(self, process: BaseDataProcess):
self._process.append(process)
class SplitProcess(BaseDataProcess):
def __init__(self, process: typing.Dict[str, typing.Optional[BaseDataProcess]]) -> None:
self._process = process
def __call__(self, data, test):
data = {
k: p(data, test) if p is not None else data
for k, p in self._process.items()
}
return data
class WaveFileLoadProcess(BaseDataProcess):
def __init__(self, sample_rate: int, top_db: float = None, pad_second: float = 0, dtype=numpy.float32) -> None:
self._sample_rate = sample_rate
self._top_db = top_db
self._pad_second = pad_second
self._dtype = dtype
def __call__(self, data: str, test=None):
wave = librosa.core.load(data, sr=self._sample_rate, dtype=self._dtype)[0]
if self._top_db is not None:
wave = librosa.effects.remix(wave, intervals=librosa.effects.split(wave, top_db=self._top_db))
if self._pad_second > 0.0:
p = int(self._sample_rate * self._pad_second)
wave = numpy.pad(wave, pad_width=(p, p), mode='constant')
return Wave(wave, self._sample_rate)
class AcousticFeatureProcess(BaseDataProcess):
def __init__(
self,
frame_period,
order,
alpha,
f0_estimating_method,
f0_floor=71,
f0_ceil=800,
dtype=numpy.float32,
) -> None:
self._frame_period = frame_period
self._order = order
self._alpha = alpha
self._f0_estimating_method = f0_estimating_method
self._f0_floor = f0_floor
self._f0_ceil = f0_ceil
self._dtype = dtype
def __call__(self, data: Wave, test=None):
x = data.wave.astype(numpy.float64)
fs = data.sampling_rate
if self._f0_estimating_method == 'dio':
_f0, t = pyworld.dio(
x,
fs,
frame_period=self._frame_period,
f0_floor=self._f0_floor,
f0_ceil=self._f0_ceil,
)
else:
from world4py.np import apis
_f0, t = apis.harvest(
x,
fs,
frame_period=self._frame_period,
f0_floor=self._f0_floor,
f0_ceil=self._f0_ceil,
)
f0 = pyworld.stonemask(x, _f0, t, fs)
spectrogram = pyworld.cheaptrick(x, f0, t, fs)
aperiodicity = pyworld.d4c(x, f0, t, fs)
mfcc = pysptk.sp2mc(spectrogram, order=self._order, alpha=self._alpha)
voiced = ~(f0 == 0) # type: numpy.ndarray
feature = AcousticFeature(
f0=f0[:, None].astype(self._dtype),
spectrogram=spectrogram.astype(self._dtype),
aperiodicity=aperiodicity.astype(self._dtype),
mfcc=mfcc.astype(self._dtype),
voiced=voiced[:, None],
)
feature.validate()
return feature
class LowHighSpectrogramFeatureProcess(BaseDataProcess):
def __init__(self, frame_period, order, alpha, f0_estimating_method, dtype=numpy.float32) -> None:
self._acoustic_feature_process = AcousticFeatureProcess(
frame_period=frame_period,
order=order,
alpha=alpha,
f0_estimating_method=f0_estimating_method,
)
self._dtype = dtype
self._alpha = alpha
def __call__(self, data: Wave, test):
acoustic_feature = self._acoustic_feature_process(data, test=True).astype_only_float(self._dtype)
high_spectrogram = acoustic_feature.spectrogram
fftlen = pyworld.get_cheaptrick_fft_size(data.sampling_rate)
low_spectrogram = pysptk.mc2sp(
acoustic_feature.mfcc,
alpha=self._alpha,
fftlen=fftlen,
)
feature = LowHighSpectrogramFeature(
low=low_spectrogram,
high=high_spectrogram,
)
feature.validate()
return feature
class AcousticFeatureLoadProcess(BaseDataProcess):
def __init__(self, validate=False) -> None:
self._validate = validate
def __call__(self, path: Path, test=None):
d = numpy.load(path.expanduser()).item() # type: dict
feature = AcousticFeature(
f0=d['f0'],
spectrogram=d['spectrogram'],
aperiodicity=d['aperiodicity'],
mfcc=d['mfcc'],
voiced=d['voiced'],
)
if self._validate:
feature.validate()
return feature
class LowHighSpectrogramFeatureLoadProcess(BaseDataProcess):
def __init__(self, validate=False) -> None:
self._validate = validate
def __call__(self, path: Path, test=None):
d = numpy.load(path.expanduser()).item() # type: dict
feature = LowHighSpectrogramFeature(
low=d['low'],
high=d['high'],
)
if self._validate:
feature.validate()
return feature
class AcousticFeatureSaveProcess(BaseDataProcess):
def __init__(self, validate=False, ignore: List[str] = None) -> None:
self._validate = validate
self._ignore = ignore if ignore is not None else []
def __call__(self, data: Dict[str, Any], test=None):
path = data['path'] # type: Path
feature = data['feature'] # type: AcousticFeature
if self._validate:
feature.validate()
d = dict(
f0=feature.f0,
spectrogram=feature.spectrogram,
aperiodicity=feature.aperiodicity,
mfcc=feature.mfcc,
voiced=feature.voiced,
)
for k in self._ignore:
assert k in d
d[k] = numpy.nan
numpy.save(path.absolute(), d)
class DistillateUsingFeatureProcess(BaseDataProcess):
def __init__(self, targets: List[str]) -> None:
self._targets = targets
def __call__(self, feature: AcousticFeature, test=None):
d = defaultdict(lambda: numpy.nan, **{t: getattr(feature, t) for t in self._targets})
return AcousticFeature(
f0=d['f0'],
spectrogram=d['spectrogram'],
aperiodicity=d['aperiodicity'],
mfcc=d['mfcc'],
voiced=d['voiced'],
)
class MakeMaskProcess(BaseDataProcess):
def __init__(self) -> None:
pass
def __call__(self, feature: AcousticFeature, test=None):
return AcousticFeature(
f0=feature.voiced,
spectrogram=numpy.ones_like(feature.spectrogram, dtype=numpy.bool),
aperiodicity=numpy.ones_like(feature.aperiodicity, dtype=numpy.bool),
mfcc=numpy.ones_like(feature.mfcc, dtype=numpy.bool),
voiced=numpy.ones_like(feature.voiced, dtype=numpy.bool),
).astype(numpy.float32)
class AcousticFeatureNormalizeProcess(BaseDataProcess):
def __init__(self, mean: AcousticFeature, var: AcousticFeature) -> None:
self._mean = mean
self._var = var
def __call__(self, data: AcousticFeature, test=None):
f0 = (data.f0 - self._mean.f0) / numpy.sqrt(self._var.f0)
f0[~data.voiced] = 0
return AcousticFeature(
f0=f0,
spectrogram=(data.spectrogram - self._mean.spectrogram) / numpy.sqrt(self._var.spectrogram),
aperiodicity=(data.aperiodicity - self._mean.aperiodicity) / numpy.sqrt(self._var.aperiodicity),
mfcc=(data.mfcc - self._mean.mfcc) / numpy.sqrt(self._var.mfcc),
voiced=data.voiced,
)
class AcousticFeatureDenormalizeProcess(BaseDataProcess):
def __init__(self, mean: AcousticFeature, var: AcousticFeature) -> None:
self._mean = mean
self._var = var
def __call__(self, data: AcousticFeature, test=None):
f0 = data.f0 * numpy.sqrt(self._var.f0) + self._mean.f0
f0[~data.voiced] = 0
return AcousticFeature(
f0=f0,
spectrogram=data.spectrogram * numpy.sqrt(self._var.spectrogram) + self._mean.spectrogram,
aperiodicity=data.aperiodicity * numpy.sqrt(self._var.aperiodicity) + self._mean.aperiodicity,
mfcc=data.mfcc * numpy.sqrt(self._var.mfcc) + self._mean.mfcc,
voiced=data.voiced,
)
class EncodeFeatureProcess(BaseDataProcess):
def __init__(self, targets: List[str]) -> None:
self._targets = targets
def __call__(self, data: AcousticFeature, test):
feature = numpy.concatenate([getattr(data, t) for t in self._targets], axis=1)
feature = feature.T
return feature
class DecodeFeatureProcess(BaseDataProcess):
def __init__(self, targets: List[str], sizes: Dict[str, int]) -> None:
assert all(t in sizes for t in targets)
self._targets = targets
self._sizes = sizes
def __call__(self, data: numpy.ndarray, test):
data = data.T
lasts = numpy.cumsum([self._sizes[t] for t in self._targets]).tolist()
assert data.shape[1] == lasts[-1]
d = defaultdict(lambda: numpy.nan, **{
t: data[:, bef:aft]
for t, bef, aft in zip(self._targets, [0] + lasts[:-1], lasts)
})
return AcousticFeature(
f0=d['f0'],
spectrogram=d['spectrogram'],
aperiodicity=d['aperiodicity'],
mfcc=d['mfcc'],
voiced=d['voiced'],
)
class ShapeAlignProcess(BaseDataProcess):
def __call__(self, data, test):
data1, data2, data3 = data['input'], data['target'], data['mask']
m = max(data1.shape[1], data2.shape[1], data3.shape[1])
data1 = numpy.pad(data1, ((0, 0), (0, m - data1.shape[1])), mode='constant')
data2 = numpy.pad(data2, ((0, 0), (0, m - data2.shape[1])), mode='constant')
data3 = numpy.pad(data3, ((0, 0), (0, m - data3.shape[1])), mode='constant')
data['input'], data['target'], data['mask'] = data1, data2, data3
return data
class RandomPaddingProcess(BaseDataProcess):
def __init__(self, min_size: int, time_axis: int = 1) -> None:
self._min_size = min_size
self._time_axis = time_axis
def __call__(self, datas: Dict[str, Any], test=True):
assert not test
data, seed = datas['data'], datas['seed']
random = numpy.random.RandomState(seed)
if data.shape[self._time_axis] >= self._min_size:
return data
pre = random.randint(self._min_size - data.shape[self._time_axis] + 1)
post = self._min_size - pre
pad = [(0, 0)] * data.ndim
pad[self._time_axis] = (pre, post)
return numpy.pad(data, pad, mode='constant')
class LastPaddingProcess(BaseDataProcess):
def __init__(self, min_size: int, time_axis: int = 1) -> None:
assert time_axis == 1
self._min_size = min_size
self._time_axis = time_axis
def __call__(self, data: numpy.ndarray, test=None):
if data.shape[self._time_axis] >= self._min_size:
return data
pre = self._min_size - data.shape[self._time_axis]
return numpy.pad(data, ((0, 0), (pre, 0)), mode='constant')
class RandomCropProcess(BaseDataProcess):
def __init__(self, crop_size: int, time_axis: int = 1) -> None:
self._crop_size = crop_size
self._time_axis = time_axis
def __call__(self, datas: Dict[str, Any], test=True):
assert not test
data, seed = datas['data'], datas['seed']
random = numpy.random.RandomState(seed)
len_time = data.shape[self._time_axis]
assert len_time >= self._crop_size
start = random.randint(len_time - self._crop_size + 1)
return numpy.split(data, [start, start + self._crop_size], axis=self._time_axis)[1]
class FirstCropProcess(BaseDataProcess):
def __init__(self, crop_size: int, time_axis: int = 1) -> None:
self._crop_size = crop_size
self._time_axis = time_axis
def __call__(self, data: numpy.ndarray, test=None):
return numpy.split(data, [0, self._crop_size], axis=self._time_axis)[1]
class AddNoiseProcess(BaseDataProcess):
def __init__(self, p_global: float = None, p_local: float = None) -> None:
assert p_global is None or 0 <= p_global
assert p_local is None or 0 <= p_local
self._p_global = p_global
self._p_local = p_local
def __call__(self, data: numpy.ndarray, test):
assert not test
g = numpy.random.randn() * self._p_global
l = numpy.random.randn(*data.shape).astype(data.dtype) * self._p_local
return data + g + l
class RandomBlurProcess(BaseDataProcess):
def __init__(self, blur_size_factor: float, time_axis: int = 1) -> None:
assert time_axis == 1
self._blur_size_factor = blur_size_factor
self._time_axis = time_axis
def __call__(self, data: numpy.ndarray, test=None):
assert not test
blur_size = numpy.abs(numpy.random.randn()) * self._blur_size_factor
return scipy.ndimage.gaussian_filter(data, (0, blur_size))
class DataProcessDataset(chainer.dataset.DatasetMixin):
def __init__(self, data: typing.List, data_process: BaseDataProcess) -> None:
self._data = data
self._data_process = data_process
def __len__(self):
return len(self._data)
def get_example(self, i):
return self._data_process(data=self._data[i], test=not chainer.config.train)
def create(config: DatasetConfig):
acoustic_feature_load_process = AcousticFeatureLoadProcess()
input_mean = acoustic_feature_load_process(config.input_mean_path, test=True)
input_var = acoustic_feature_load_process(config.input_var_path, test=True)
target_mean = acoustic_feature_load_process(config.target_mean_path, test=True)
target_var = acoustic_feature_load_process(config.target_var_path, test=True)
# {input_path, target_path}
data_process_base = ChainProcess([
SplitProcess(dict(
input=ChainProcess([
LambdaProcess(lambda d, test: d['input_path']),
acoustic_feature_load_process,
DistillateUsingFeatureProcess(config.features + ['voiced']),
AcousticFeatureNormalizeProcess(mean=input_mean, var=input_var),
EncodeFeatureProcess(config.features),
]),
target=ChainProcess([
LambdaProcess(lambda d, test: d['target_path']),
acoustic_feature_load_process,
DistillateUsingFeatureProcess(config.features + ['voiced']),
AcousticFeatureNormalizeProcess(mean=target_mean, var=target_var),
SplitProcess(dict(
feature=EncodeFeatureProcess(config.features),
mask=ChainProcess([
MakeMaskProcess(),
EncodeFeatureProcess(config.features),
])
)),
]),
)),
LambdaProcess(
lambda d, test: dict(input=d['input'], target=d['target']['feature'], mask=d['target']['mask'])),
ShapeAlignProcess(),
])
data_process_train = copy.deepcopy(data_process_base)
# cropping
if config.train_crop_size is not None:
def add_seed():
return LambdaProcess(lambda d, test: dict(seed=numpy.random.randint(2 ** 32), **d))
def padding(s):
return ChainProcess([
LambdaProcess(lambda d, test: dict(data=d[s], seed=d['seed'])),
RandomPaddingProcess(min_size=config.train_crop_size),
])
def crop(s):
return ChainProcess([
LambdaProcess(lambda d, test: dict(data=d[s], seed=d['seed'])),
RandomCropProcess(crop_size=config.train_crop_size),
])
data_process_train.append(ChainProcess([
add_seed(),
SplitProcess(dict(input=padding('input'), target=padding('target'), mask=padding('mask'))),
add_seed(),
SplitProcess(dict(input=crop('input'), target=crop('target'), mask=crop('mask'))),
]))
# add noise
data_process_train.append(SplitProcess(dict(
input=ChainProcess([
LambdaProcess(lambda d, test: d['input']),
AddNoiseProcess(p_global=config.input_global_noise, p_local=config.input_local_noise),
]),
target=ChainProcess([
LambdaProcess(lambda d, test: d['target']),
AddNoiseProcess(p_global=config.target_global_noise, p_local=config.target_local_noise),
]),
mask=ChainProcess([
LambdaProcess(lambda d, test: d['mask']),
]),
)))
data_process_test = copy.deepcopy(data_process_base)
if config.train_crop_size is not None:
data_process_test.append(SplitProcess(dict(
input=ChainProcess([
LambdaProcess(lambda d, test: d['input']),
LastPaddingProcess(min_size=config.train_crop_size),
FirstCropProcess(crop_size=config.train_crop_size),
]),
target=ChainProcess([
LambdaProcess(lambda d, test: d['target']),
LastPaddingProcess(min_size=config.train_crop_size),
FirstCropProcess(crop_size=config.train_crop_size),
]),
mask=ChainProcess([
LambdaProcess(lambda d, test: d['mask']),
LastPaddingProcess(min_size=config.train_crop_size),
FirstCropProcess(crop_size=config.train_crop_size),
]),
)))
input_paths = list(sorted([Path(p) for p in glob.glob(str(config.input_glob))]))
target_paths = list(sorted([Path(p) for p in glob.glob(str(config.target_glob))]))
assert len(input_paths) == len(target_paths)
num_test = config.num_test
pairs = [
dict(input_path=input_path, target_path=target_path)
for input_path, target_path in zip(input_paths, target_paths)
]
numpy.random.RandomState(config.seed).shuffle(pairs)
train_paths = pairs[num_test:]
test_paths = pairs[:num_test]
train_for_evaluate_paths = train_paths[:num_test]
return {
'train': DataProcessDataset(train_paths, data_process_train),
'test': DataProcessDataset(test_paths, data_process_test),
'train_eval': DataProcessDataset(train_for_evaluate_paths, data_process_test),
}
def create_sr(config: SRDatasetConfig):
data_process_base = ChainProcess([
LowHighSpectrogramFeatureLoadProcess(validate=True),
SplitProcess(dict(
input=LambdaProcess(lambda d, test: numpy.log(d.low[:, :-1])),
target=LambdaProcess(lambda d, test: numpy.log(d.high[:, :-1])),
)),
])
data_process_train = copy.deepcopy(data_process_base)
# blur
data_process_train.append(SplitProcess(dict(
input=ChainProcess([
LambdaProcess(lambda d, test: d['input']),
RandomBlurProcess(blur_size_factor=config.blur_size_factor),
]),
target=ChainProcess([
LambdaProcess(lambda d, test: d['target']),
]),
)))
# cropping
if config.train_crop_size is not None:
def add_seed():
return LambdaProcess(lambda d, test: dict(seed=numpy.random.randint(2 ** 32), **d))
def padding(s):
return ChainProcess([
LambdaProcess(lambda d, test: dict(data=d[s], seed=d['seed'])),
RandomPaddingProcess(min_size=config.train_crop_size, time_axis=0),
])
def crop(s):
return ChainProcess([
LambdaProcess(lambda d, test: dict(data=d[s], seed=d['seed'])),
RandomCropProcess(crop_size=config.train_crop_size, time_axis=0),
])
data_process_train.append(ChainProcess([
add_seed(),
SplitProcess(dict(input=padding('input'), target=padding('target'))),
add_seed(),
SplitProcess(dict(input=crop('input'), target=crop('target'))),
]))
# add noise
data_process_train.append(SplitProcess(dict(
input=ChainProcess([
LambdaProcess(lambda d, test: d['input']),
AddNoiseProcess(p_global=config.input_global_noise, p_local=config.input_local_noise),
]),
target=ChainProcess([
LambdaProcess(lambda d, test: d['target']),
]),
)))
data_process_train.append(LambdaProcess(lambda d, test: {
'input': d['input'][numpy.newaxis],
'target': d['target'][numpy.newaxis],
}))
data_process_test = copy.deepcopy(data_process_base)
if config.train_crop_size is not None:
data_process_test.append(SplitProcess(dict(
input=ChainProcess([
LambdaProcess(lambda d, test: d['input']),
LastPaddingProcess(min_size=config.train_crop_size),
FirstCropProcess(crop_size=config.train_crop_size, time_axis=0),
]),
target=ChainProcess([
LambdaProcess(lambda d, test: d['target']),
LastPaddingProcess(min_size=config.train_crop_size),
FirstCropProcess(crop_size=config.train_crop_size, time_axis=0),
]),
)))
data_process_test.append(LambdaProcess(lambda d, test: {
'input': d['input'][numpy.newaxis],
'target': d['target'][numpy.newaxis],
}))
input_paths = list(sorted([Path(p) for p in glob.glob(str(config.input_glob))]))
num_test = config.num_test
numpy.random.RandomState(config.seed).shuffle(input_paths)
train_paths = input_paths[num_test:]
test_paths = input_paths[:num_test]
train_for_evaluate_paths = train_paths[:num_test]
return {
'train': DataProcessDataset(train_paths, data_process_train),
'test': DataProcessDataset(test_paths, data_process_test),
'train_eval': DataProcessDataset(train_for_evaluate_paths, data_process_test),
}
| {"/become_yukarin/dataset/dataset.py": ["/become_yukarin/config/config.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py"], "/become_yukarin/voice_changer.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/data_struct.py", "/become_yukarin/super_resolution.py"], "/train.py": ["/become_yukarin/config/config.py", "/become_yukarin/dataset/__init__.py", "/become_yukarin/model/model.py", "/become_yukarin/updater/updater.py"], "/become_yukarin/__init__.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/super_resolution.py", "/become_yukarin/vocoder.py", "/become_yukarin/voice_changer.py"], "/become_yukarin/model/model.py": ["/become_yukarin/config/config.py"], "/become_yukarin/model/cbhg_model.py": ["/become_yukarin/config/old_config.py"], "/become_yukarin/updater/updater.py": ["/become_yukarin/config/config.py", "/become_yukarin/model/model.py"], "/tests/test_dataset.py": ["/become_yukarin/dataset/__init__.py"], "/become_yukarin/acoustic_converter.py": ["/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/model/model.py"], "/scripts/extract_spectrogram_pair.py": ["/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/scripts/extract_acoustic_feature.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/dataset/utility.py", "/become_yukarin/param.py"], "/scripts/super_resolution_test.py": ["/become_yukarin/__init__.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/config/config.py": ["/become_yukarin/param.py"], "/become_yukarin/super_resolution.py": ["/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/vocoder.py": ["/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/become_yukarin/config/sr_config.py": ["/become_yukarin/param.py"], "/become_yukarin/dataset/__init__.py": ["/become_yukarin/dataset/dataset.py"]} |
67,437 | HamaguchiKazuki/become-yukarin | refs/heads/master | /become_yukarin/data_struct.py | from typing import NamedTuple, Dict, List
import numpy
import pyworld
_min_mc = -18.3
class Wave(NamedTuple):
wave: numpy.ndarray
sampling_rate: int
class AcousticFeature(NamedTuple):
f0: numpy.ndarray = numpy.nan
spectrogram: numpy.ndarray = numpy.nan
aperiodicity: numpy.ndarray = numpy.nan
mfcc: numpy.ndarray = numpy.nan
voiced: numpy.ndarray = numpy.nan
@staticmethod
def dtypes():
return dict(
f0=numpy.float32,
spectrogram=numpy.float32,
aperiodicity=numpy.float32,
mfcc=numpy.float32,
voiced=numpy.bool,
)
def astype(self, dtype):
return AcousticFeature(
f0=self.f0.astype(dtype),
spectrogram=self.spectrogram.astype(dtype),
aperiodicity=self.aperiodicity.astype(dtype),
mfcc=self.mfcc.astype(dtype),
voiced=self.voiced.astype(dtype),
)
def astype_only_float(self, dtype):
return AcousticFeature(
f0=self.f0.astype(dtype),
spectrogram=self.spectrogram.astype(dtype),
aperiodicity=self.aperiodicity.astype(dtype),
mfcc=self.mfcc.astype(dtype),
voiced=self.voiced,
)
def validate(self):
assert self.f0.ndim == 2
assert self.spectrogram.ndim == 2
assert self.aperiodicity.ndim == 2
assert self.mfcc.ndim == 2
assert self.voiced.ndim == 2
len_time = len(self.f0)
assert len(self.spectrogram) == len_time
assert len(self.aperiodicity) == len_time
assert len(self.mfcc) == len_time
assert len(self.voiced) == len_time
assert self.voiced.dtype == numpy.bool
@staticmethod
def silent(length: int, sizes: Dict[str, int], keys: List[str]):
d = {}
if 'f0' in keys:
d['f0'] = numpy.zeros((length, sizes['f0']), dtype=AcousticFeature.dtypes()['f0'])
if 'spectrogram' in keys:
d['spectrogram'] = numpy.zeros((length, sizes['spectrogram']),
dtype=AcousticFeature.dtypes()['spectrogram'])
if 'aperiodicity' in keys:
d['aperiodicity'] = numpy.zeros((length, sizes['aperiodicity']),
dtype=AcousticFeature.dtypes()['aperiodicity'])
if 'mfcc' in keys:
d['mfcc'] = numpy.hstack((
numpy.ones((length, 1), dtype=AcousticFeature.dtypes()['mfcc']) * _min_mc,
numpy.zeros((length, sizes['mfcc'] - 1), dtype=AcousticFeature.dtypes()['mfcc'])
))
if 'voiced' in keys:
d['voiced'] = numpy.zeros((length, sizes['voiced']), dtype=AcousticFeature.dtypes()['voiced'])
feature = AcousticFeature(**d)
return feature
@staticmethod
def concatenate(fs: List['AcousticFeature'], keys: List[str]):
is_target = lambda a: not numpy.any(numpy.isnan(a))
return AcousticFeature(**{
key: numpy.concatenate([getattr(f, key) for f in fs]) if is_target(getattr(fs[0], key)) else numpy.nan
for key in keys
})
def pick(self, first: int, last: int):
is_target = lambda a: not numpy.any(numpy.isnan(a))
return AcousticFeature(
f0=self.f0[first:last] if is_target(self.f0) else numpy.nan,
spectrogram=self.spectrogram[first:last] if is_target(self.spectrogram) else numpy.nan,
aperiodicity=self.aperiodicity[first:last] if is_target(self.aperiodicity) else numpy.nan,
mfcc=self.mfcc[first:last] if is_target(self.mfcc) else numpy.nan,
voiced=self.voiced[first:last] if is_target(self.voiced) else numpy.nan,
)
@staticmethod
def get_sizes(sampling_rate: int, order: int):
fft_size = pyworld.get_cheaptrick_fft_size(fs=sampling_rate)
return dict(
f0=1,
spectrogram=fft_size // 2 + 1,
aperiodicity=fft_size // 2 + 1,
mfcc=order + 1,
voiced=1,
)
class LowHighSpectrogramFeature(NamedTuple):
low: numpy.ndarray
high: numpy.ndarray
def validate(self):
assert self.low.ndim == 2
assert self.high.ndim == 2
assert self.low.shape == self.high.shape
| {"/become_yukarin/dataset/dataset.py": ["/become_yukarin/config/config.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py"], "/become_yukarin/voice_changer.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/data_struct.py", "/become_yukarin/super_resolution.py"], "/train.py": ["/become_yukarin/config/config.py", "/become_yukarin/dataset/__init__.py", "/become_yukarin/model/model.py", "/become_yukarin/updater/updater.py"], "/become_yukarin/__init__.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/super_resolution.py", "/become_yukarin/vocoder.py", "/become_yukarin/voice_changer.py"], "/become_yukarin/model/model.py": ["/become_yukarin/config/config.py"], "/become_yukarin/model/cbhg_model.py": ["/become_yukarin/config/old_config.py"], "/become_yukarin/updater/updater.py": ["/become_yukarin/config/config.py", "/become_yukarin/model/model.py"], "/tests/test_dataset.py": ["/become_yukarin/dataset/__init__.py"], "/become_yukarin/acoustic_converter.py": ["/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/model/model.py"], "/scripts/extract_spectrogram_pair.py": ["/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/scripts/extract_acoustic_feature.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/dataset/utility.py", "/become_yukarin/param.py"], "/scripts/super_resolution_test.py": ["/become_yukarin/__init__.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/config/config.py": ["/become_yukarin/param.py"], "/become_yukarin/super_resolution.py": ["/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/vocoder.py": ["/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/become_yukarin/config/sr_config.py": ["/become_yukarin/param.py"], "/become_yukarin/dataset/__init__.py": ["/become_yukarin/dataset/dataset.py"]} |
67,438 | HamaguchiKazuki/become-yukarin | refs/heads/master | /scripts/ln_atr503_to_subset.py | import argparse
from pathlib import Path
parser = argparse.ArgumentParser()
parser.add_argument('input', type=Path)
parser.add_argument('output', type=Path)
parser.add_argument('--prefix', default='')
argument = parser.parse_args()
input = argument.input # type: Path
output = argument.output # type: Path
paths = list(sorted(input.glob('*'), key=lambda p: int(''.join(filter(str.isdigit, p.name)))))
assert len(paths) == 503
output.mkdir(exist_ok=True)
names = ['{}{:02d}'.format(s, n + 1) for s in 'ABCDEFGHIJ' for n in range(50)]
names += ['J51', 'J52', 'J53']
for p, n in zip(paths, names):
out = output / (argument.prefix + n + p.suffix)
out.symlink_to(p)
| {"/become_yukarin/dataset/dataset.py": ["/become_yukarin/config/config.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py"], "/become_yukarin/voice_changer.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/data_struct.py", "/become_yukarin/super_resolution.py"], "/train.py": ["/become_yukarin/config/config.py", "/become_yukarin/dataset/__init__.py", "/become_yukarin/model/model.py", "/become_yukarin/updater/updater.py"], "/become_yukarin/__init__.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/super_resolution.py", "/become_yukarin/vocoder.py", "/become_yukarin/voice_changer.py"], "/become_yukarin/model/model.py": ["/become_yukarin/config/config.py"], "/become_yukarin/model/cbhg_model.py": ["/become_yukarin/config/old_config.py"], "/become_yukarin/updater/updater.py": ["/become_yukarin/config/config.py", "/become_yukarin/model/model.py"], "/tests/test_dataset.py": ["/become_yukarin/dataset/__init__.py"], "/become_yukarin/acoustic_converter.py": ["/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/model/model.py"], "/scripts/extract_spectrogram_pair.py": ["/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/scripts/extract_acoustic_feature.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/dataset/utility.py", "/become_yukarin/param.py"], "/scripts/super_resolution_test.py": ["/become_yukarin/__init__.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/config/config.py": ["/become_yukarin/param.py"], "/become_yukarin/super_resolution.py": ["/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/vocoder.py": ["/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/become_yukarin/config/sr_config.py": ["/become_yukarin/param.py"], "/become_yukarin/dataset/__init__.py": ["/become_yukarin/dataset/dataset.py"]} |
67,439 | HamaguchiKazuki/become-yukarin | refs/heads/master | /become_yukarin/voice_changer.py | import numpy
from .acoustic_converter import AcousticConverter
from .data_struct import AcousticFeature
from .super_resolution import SuperResolution
class VoiceChanger(object):
def __init__(
self,
acoustic_converter: AcousticConverter,
super_resolution: SuperResolution,
output_sampling_rate: int = None,
) -> None:
if output_sampling_rate is None:
output_sampling_rate = super_resolution.config.dataset.param.voice_param.sample_rate
self.acoustic_converter = acoustic_converter
self.super_resolution = super_resolution
self.output_sampling_rate = output_sampling_rate
def convert_from_acoustic_feature(self, f_in: AcousticFeature):
f_low = self.acoustic_converter.convert_to_feature(f_in)
s_high = self.super_resolution.convert(f_low.spectrogram.astype(numpy.float32))
f_high = self.super_resolution.convert_to_feature(s_high, f_low)
return f_high
| {"/become_yukarin/dataset/dataset.py": ["/become_yukarin/config/config.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py"], "/become_yukarin/voice_changer.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/data_struct.py", "/become_yukarin/super_resolution.py"], "/train.py": ["/become_yukarin/config/config.py", "/become_yukarin/dataset/__init__.py", "/become_yukarin/model/model.py", "/become_yukarin/updater/updater.py"], "/become_yukarin/__init__.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/super_resolution.py", "/become_yukarin/vocoder.py", "/become_yukarin/voice_changer.py"], "/become_yukarin/model/model.py": ["/become_yukarin/config/config.py"], "/become_yukarin/model/cbhg_model.py": ["/become_yukarin/config/old_config.py"], "/become_yukarin/updater/updater.py": ["/become_yukarin/config/config.py", "/become_yukarin/model/model.py"], "/tests/test_dataset.py": ["/become_yukarin/dataset/__init__.py"], "/become_yukarin/acoustic_converter.py": ["/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/model/model.py"], "/scripts/extract_spectrogram_pair.py": ["/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/scripts/extract_acoustic_feature.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/dataset/utility.py", "/become_yukarin/param.py"], "/scripts/super_resolution_test.py": ["/become_yukarin/__init__.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/config/config.py": ["/become_yukarin/param.py"], "/become_yukarin/super_resolution.py": ["/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/vocoder.py": ["/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/become_yukarin/config/sr_config.py": ["/become_yukarin/param.py"], "/become_yukarin/dataset/__init__.py": ["/become_yukarin/dataset/dataset.py"]} |
67,440 | HamaguchiKazuki/become-yukarin | refs/heads/master | /train.py | import argparse
from functools import partial
from pathlib import Path
from chainer import cuda
from chainer import optimizers
from chainer import training
from chainer.dataset import convert
from chainer.iterators import MultiprocessIterator
from chainer.training import extensions
from chainerui.utils import save_args
from become_yukarin.config.config import create_from_json
from become_yukarin.dataset import create as create_dataset
from become_yukarin.model.model import create
from become_yukarin.updater.updater import Updater
parser = argparse.ArgumentParser()
parser.add_argument('config_json_path', type=Path)
parser.add_argument('output', type=Path)
arguments = parser.parse_args()
config = create_from_json(arguments.config_json_path)
arguments.output.mkdir(exist_ok=True)
config.save_as_json((arguments.output / 'config.json').absolute())
# model
if config.train.gpu >= 0:
cuda.get_device_from_id(config.train.gpu).use()
predictor, discriminator = create(config.model)
models = {
'predictor': predictor,
'discriminator': discriminator,
}
# dataset
dataset = create_dataset(config.dataset)
train_iter = MultiprocessIterator(dataset['train'], config.train.batchsize)
test_iter = MultiprocessIterator(dataset['test'], config.train.batchsize, repeat=False, shuffle=False)
train_eval_iter = MultiprocessIterator(dataset['train_eval'], config.train.batchsize, repeat=False, shuffle=False)
# optimizer
def create_optimizer(model):
optimizer = optimizers.Adam(alpha=0.0002, beta1=0.5, beta2=0.999)
optimizer.setup(model)
return optimizer
opts = {key: create_optimizer(model) for key, model in models.items()}
# updater
converter = partial(convert.concat_examples, padding=0)
updater = Updater(
loss_config=config.loss,
predictor=predictor,
discriminator=discriminator,
device=config.train.gpu,
iterator=train_iter,
optimizer=opts,
converter=converter,
)
# trainer
trigger_log = (config.train.log_iteration, 'iteration')
trigger_snapshot = (config.train.snapshot_iteration, 'iteration')
trainer = training.Trainer(updater, out=arguments.output)
ext = extensions.Evaluator(test_iter, models, converter, device=config.train.gpu, eval_func=updater.forward)
trainer.extend(ext, name='test', trigger=trigger_log)
ext = extensions.Evaluator(train_eval_iter, models, converter, device=config.train.gpu, eval_func=updater.forward)
trainer.extend(ext, name='train', trigger=trigger_log)
trainer.extend(extensions.dump_graph('predictor/loss'))
ext = extensions.snapshot_object(predictor, filename='predictor_{.updater.iteration}.npz')
trainer.extend(ext, trigger=trigger_snapshot)
trainer.extend(extensions.LogReport(trigger=trigger_log))
save_args(arguments, arguments.output)
trainer.run()
| {"/become_yukarin/dataset/dataset.py": ["/become_yukarin/config/config.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py"], "/become_yukarin/voice_changer.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/data_struct.py", "/become_yukarin/super_resolution.py"], "/train.py": ["/become_yukarin/config/config.py", "/become_yukarin/dataset/__init__.py", "/become_yukarin/model/model.py", "/become_yukarin/updater/updater.py"], "/become_yukarin/__init__.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/super_resolution.py", "/become_yukarin/vocoder.py", "/become_yukarin/voice_changer.py"], "/become_yukarin/model/model.py": ["/become_yukarin/config/config.py"], "/become_yukarin/model/cbhg_model.py": ["/become_yukarin/config/old_config.py"], "/become_yukarin/updater/updater.py": ["/become_yukarin/config/config.py", "/become_yukarin/model/model.py"], "/tests/test_dataset.py": ["/become_yukarin/dataset/__init__.py"], "/become_yukarin/acoustic_converter.py": ["/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/model/model.py"], "/scripts/extract_spectrogram_pair.py": ["/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/scripts/extract_acoustic_feature.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/dataset/utility.py", "/become_yukarin/param.py"], "/scripts/super_resolution_test.py": ["/become_yukarin/__init__.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/config/config.py": ["/become_yukarin/param.py"], "/become_yukarin/super_resolution.py": ["/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/vocoder.py": ["/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/become_yukarin/config/sr_config.py": ["/become_yukarin/param.py"], "/become_yukarin/dataset/__init__.py": ["/become_yukarin/dataset/dataset.py"]} |
67,441 | HamaguchiKazuki/become-yukarin | refs/heads/master | /become_yukarin/__init__.py | from . import config
from . import dataset
from . import param
from .acoustic_converter import AcousticConverter
from .super_resolution import SuperResolution
from .vocoder import RealtimeVocoder
from .vocoder import Vocoder
from .voice_changer import VoiceChanger
| {"/become_yukarin/dataset/dataset.py": ["/become_yukarin/config/config.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py"], "/become_yukarin/voice_changer.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/data_struct.py", "/become_yukarin/super_resolution.py"], "/train.py": ["/become_yukarin/config/config.py", "/become_yukarin/dataset/__init__.py", "/become_yukarin/model/model.py", "/become_yukarin/updater/updater.py"], "/become_yukarin/__init__.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/super_resolution.py", "/become_yukarin/vocoder.py", "/become_yukarin/voice_changer.py"], "/become_yukarin/model/model.py": ["/become_yukarin/config/config.py"], "/become_yukarin/model/cbhg_model.py": ["/become_yukarin/config/old_config.py"], "/become_yukarin/updater/updater.py": ["/become_yukarin/config/config.py", "/become_yukarin/model/model.py"], "/tests/test_dataset.py": ["/become_yukarin/dataset/__init__.py"], "/become_yukarin/acoustic_converter.py": ["/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/model/model.py"], "/scripts/extract_spectrogram_pair.py": ["/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/scripts/extract_acoustic_feature.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/dataset/utility.py", "/become_yukarin/param.py"], "/scripts/super_resolution_test.py": ["/become_yukarin/__init__.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/config/config.py": ["/become_yukarin/param.py"], "/become_yukarin/super_resolution.py": ["/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/vocoder.py": ["/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/become_yukarin/config/sr_config.py": ["/become_yukarin/param.py"], "/become_yukarin/dataset/__init__.py": ["/become_yukarin/dataset/dataset.py"]} |
67,442 | HamaguchiKazuki/become-yukarin | refs/heads/master | /become_yukarin/updater/__init__.py | from . import sr_updater
from . import updater
| {"/become_yukarin/dataset/dataset.py": ["/become_yukarin/config/config.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py"], "/become_yukarin/voice_changer.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/data_struct.py", "/become_yukarin/super_resolution.py"], "/train.py": ["/become_yukarin/config/config.py", "/become_yukarin/dataset/__init__.py", "/become_yukarin/model/model.py", "/become_yukarin/updater/updater.py"], "/become_yukarin/__init__.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/super_resolution.py", "/become_yukarin/vocoder.py", "/become_yukarin/voice_changer.py"], "/become_yukarin/model/model.py": ["/become_yukarin/config/config.py"], "/become_yukarin/model/cbhg_model.py": ["/become_yukarin/config/old_config.py"], "/become_yukarin/updater/updater.py": ["/become_yukarin/config/config.py", "/become_yukarin/model/model.py"], "/tests/test_dataset.py": ["/become_yukarin/dataset/__init__.py"], "/become_yukarin/acoustic_converter.py": ["/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/model/model.py"], "/scripts/extract_spectrogram_pair.py": ["/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/scripts/extract_acoustic_feature.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/dataset/utility.py", "/become_yukarin/param.py"], "/scripts/super_resolution_test.py": ["/become_yukarin/__init__.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/config/config.py": ["/become_yukarin/param.py"], "/become_yukarin/super_resolution.py": ["/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/vocoder.py": ["/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/become_yukarin/config/sr_config.py": ["/become_yukarin/param.py"], "/become_yukarin/dataset/__init__.py": ["/become_yukarin/dataset/dataset.py"]} |
67,443 | HamaguchiKazuki/become-yukarin | refs/heads/master | /scripts/launch.py | """
launcher for some task that have diff params
"""
import argparse
import copy
import datetime
import hashlib
import json
import subprocess
import time
from pathlib import Path
base_command_default = \
"screen -d -m -S {project/name}_gpu{train/gpu} ;" + \
"screen -S {project/name}_gpu{train/gpu} -X stuff 'python3 {python_file_path} {recipe_path} {output}\n'"
parser = argparse.ArgumentParser()
parser.add_argument('output_dir', type=Path)
parser.add_argument('--python_file_path', default='train.py')
parser.add_argument('--recipe_json_path', default='recipe/recipe.json')
parser.add_argument('--base_config_json_path', default='recipe/config.json')
parser.add_argument('--base_command', default=base_command_default)
args = parser.parse_args()
recipe = json.load(open(args.recipe_json_path, encoding='utf-8'))
recipe_each = recipe['each']
recipe_all = recipe['all']
base_config = json.load(open(args.base_config_json_path, encoding='utf-8'))
def put_config_value(config, recipe_key, value):
key_tree = recipe_key.split('/')
target = config
for key in key_tree[:-1]:
target = target[key]
target[key_tree[-1]] = value
def _replace_name(dist):
_format = {}
now = datetime.datetime.now()
if '{date}' in dist['project']['name']:
_format['date'] = now.strftime('%Y%m%d%H%M%S')
if '{hash}' in dist['project']['name']:
_format['hash'] = hashlib.md5(bytes(str(now), 'utf')).hexdigest()[:6]
if len(_format) > 0:
dist['project']['name'] = dist['project']['name'].format(**_format)
num_task = min(len(list(value)) for value in recipe_each.values())
command_list = []
for i in range(num_task):
config = copy.deepcopy(base_config)
for recipe_key in recipe_all.keys():
put_config_value(config, recipe_key, recipe_all[recipe_key])
for recipe_key in recipe_each.keys():
put_config_value(config, recipe_key, recipe_each[recipe_key][i])
_replace_name(config)
# add git branch name
git_branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode("utf-8").strip()
config['project']['tags'].append('git branch name:' + git_branch)
made_recipe_path = "{}.{}.json".format(datetime.datetime.now().strftime('%Y%m%d%H%M%S'), i)
with open(made_recipe_path, 'w', encoding='utf') as f:
json.dump(config, f, indent=2, sort_keys=True, ensure_ascii=False)
def make_key_chain(key_chain, value, dist):
if not isinstance(value, dict):
dist['/'.join(key_chain)] = value
else:
for key in value.keys():
make_key_chain(key_chain + [key], value[key], dist)
dist = {}
make_key_chain([], config, dist)
dist['output'] = args.output_dir / config['project']['name']
dist['python_file_path'] = args.python_file_path
dist['recipe_path'] = made_recipe_path
command = args.base_command.format(**dist)
command_list += [command]
print(config['project']['name'])
for command in command_list:
time.sleep(1)
subprocess.check_output(command, shell=True)
| {"/become_yukarin/dataset/dataset.py": ["/become_yukarin/config/config.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py"], "/become_yukarin/voice_changer.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/data_struct.py", "/become_yukarin/super_resolution.py"], "/train.py": ["/become_yukarin/config/config.py", "/become_yukarin/dataset/__init__.py", "/become_yukarin/model/model.py", "/become_yukarin/updater/updater.py"], "/become_yukarin/__init__.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/super_resolution.py", "/become_yukarin/vocoder.py", "/become_yukarin/voice_changer.py"], "/become_yukarin/model/model.py": ["/become_yukarin/config/config.py"], "/become_yukarin/model/cbhg_model.py": ["/become_yukarin/config/old_config.py"], "/become_yukarin/updater/updater.py": ["/become_yukarin/config/config.py", "/become_yukarin/model/model.py"], "/tests/test_dataset.py": ["/become_yukarin/dataset/__init__.py"], "/become_yukarin/acoustic_converter.py": ["/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/model/model.py"], "/scripts/extract_spectrogram_pair.py": ["/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/scripts/extract_acoustic_feature.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/dataset/utility.py", "/become_yukarin/param.py"], "/scripts/super_resolution_test.py": ["/become_yukarin/__init__.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/config/config.py": ["/become_yukarin/param.py"], "/become_yukarin/super_resolution.py": ["/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/vocoder.py": ["/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/become_yukarin/config/sr_config.py": ["/become_yukarin/param.py"], "/become_yukarin/dataset/__init__.py": ["/become_yukarin/dataset/dataset.py"]} |
67,444 | HamaguchiKazuki/become-yukarin | refs/heads/master | /become_yukarin/model/model.py | import chainer
import chainer.functions as F
import chainer.links as L
from become_yukarin.config.config import ModelConfig
class Convolution1D(chainer.links.ConvolutionND):
def __init__(self, in_channels, out_channels, ksize, stride=1, pad=0,
nobias=False, initialW=None, initial_bias=None,
cover_all=False) -> None:
super().__init__(
ndim=1,
in_channels=in_channels,
out_channels=out_channels,
ksize=ksize,
stride=stride,
pad=pad,
nobias=nobias,
initialW=initialW,
initial_bias=initial_bias,
cover_all=cover_all,
)
class Deconvolution1D(chainer.links.DeconvolutionND):
def __init__(self, in_channels, out_channels, ksize, stride=1, pad=0,
nobias=False, outsize=None,
initialW=None, initial_bias=None) -> None:
super().__init__(
ndim=1,
in_channels=in_channels,
out_channels=out_channels,
ksize=ksize,
stride=stride,
pad=pad,
nobias=nobias,
outsize=outsize,
initialW=initialW,
initial_bias=initial_bias,
)
class CBR(chainer.Chain):
def __init__(self, ch0, ch1, bn=True, sample='down', activation=F.relu, dropout=False) -> None:
super().__init__()
self.bn = bn
self.activation = activation
self.dropout = dropout
w = chainer.initializers.Normal(0.02)
with self.init_scope():
if sample == 'down':
self.c = Convolution1D(ch0, ch1, 4, 2, 1, initialW=w)
elif sample == 'up':
self.c = Deconvolution1D(ch0, ch1, 4, 2, 1, initialW=w)
else:
self.c = Convolution1D(ch0, ch1, 1, 1, 0, initialW=w)
if bn:
self.batchnorm = L.BatchNormalization(ch1)
def __call__(self, x):
h = self.c(x)
if self.bn:
h = self.batchnorm(h)
if self.dropout:
h = F.dropout(h)
if self.activation is not None:
h = self.activation(h)
return h
class Encoder(chainer.Chain):
def __init__(self, in_ch, base=64, extensive_layers=8) -> None:
super().__init__()
w = chainer.initializers.Normal(0.02)
with self.init_scope():
if extensive_layers > 0:
self.c0 = Convolution1D(in_ch, base * 1, 3, 1, 1, initialW=w)
else:
self.c0 = Convolution1D(in_ch, base * 1, 1, 1, 0, initialW=w)
_choose = lambda i: 'down' if i < extensive_layers else 'same'
self.c1 = CBR(base * 1, base * 2, bn=True, sample=_choose(1), activation=F.leaky_relu, dropout=False)
self.c2 = CBR(base * 2, base * 4, bn=True, sample=_choose(2), activation=F.leaky_relu, dropout=False)
self.c3 = CBR(base * 4, base * 8, bn=True, sample=_choose(3), activation=F.leaky_relu, dropout=False)
self.c4 = CBR(base * 8, base * 8, bn=True, sample=_choose(4), activation=F.leaky_relu, dropout=False)
self.c5 = CBR(base * 8, base * 8, bn=True, sample=_choose(5), activation=F.leaky_relu, dropout=False)
self.c6 = CBR(base * 8, base * 8, bn=True, sample=_choose(6), activation=F.leaky_relu, dropout=False)
self.c7 = CBR(base * 8, base * 8, bn=True, sample=_choose(7), activation=F.leaky_relu, dropout=False)
def __call__(self, x):
hs = [F.leaky_relu(self.c0(x))]
for i in range(1, 8):
hs.append(self['c%d' % i](hs[i - 1]))
return hs
class Decoder(chainer.Chain):
def __init__(self, out_ch, base=64, extensive_layers=8) -> None:
super().__init__()
w = chainer.initializers.Normal(0.02)
with self.init_scope():
_choose = lambda i: 'up' if i >= 8 - extensive_layers else 'same'
self.c0 = CBR(base * 8, base * 8, bn=True, sample=_choose(0), activation=F.relu, dropout=True)
self.c1 = CBR(base * 16, base * 8, bn=True, sample=_choose(1), activation=F.relu, dropout=True)
self.c2 = CBR(base * 16, base * 8, bn=True, sample=_choose(2), activation=F.relu, dropout=True)
self.c3 = CBR(base * 16, base * 8, bn=True, sample=_choose(3), activation=F.relu, dropout=False)
self.c4 = CBR(base * 16, base * 4, bn=True, sample=_choose(4), activation=F.relu, dropout=False)
self.c5 = CBR(base * 8, base * 2, bn=True, sample=_choose(5), activation=F.relu, dropout=False)
self.c6 = CBR(base * 4, base * 1, bn=True, sample=_choose(6), activation=F.relu, dropout=False)
if extensive_layers > 0:
self.c7 = Convolution1D(base * 2, out_ch, 3, 1, 1, initialW=w)
else:
self.c7 = Convolution1D(base * 2, out_ch, 1, 1, 0, initialW=w)
def __call__(self, hs):
h = self.c0(hs[-1])
for i in range(1, 8):
h = F.concat([h, hs[-i - 1]])
if i < 7:
h = self['c%d' % i](h)
else:
h = self.c7(h)
return h
class Predictor(chainer.Chain):
def __init__(self, in_ch, out_ch, base=64, extensive_layers=8) -> None:
super().__init__()
with self.init_scope():
self.encoder = Encoder(in_ch, base=base, extensive_layers=extensive_layers)
self.decoder = Decoder(out_ch, base=base, extensive_layers=extensive_layers)
def __call__(self, x):
return self.decoder(self.encoder(x))
class Discriminator(chainer.Chain):
def __init__(self, in_ch, out_ch, base=32, extensive_layers=5, is_weak=False) -> None:
super().__init__()
w = chainer.initializers.Normal(0.02)
with self.init_scope():
_choose = lambda i: 'down' if i < extensive_layers else 'same'
self.c0_0 = CBR(in_ch, base * 1, bn=False, sample=_choose(0), activation=F.leaky_relu, dropout=is_weak)
self.c0_1 = CBR(out_ch, base * 1, bn=False, sample=_choose(0), activation=F.leaky_relu, dropout=is_weak)
self.c1 = CBR(base * 2, base * 4, bn=True, sample=_choose(1), activation=F.leaky_relu, dropout=is_weak)
self.c2 = CBR(base * 4, base * 8, bn=True, sample=_choose(2), activation=F.leaky_relu, dropout=is_weak)
self.c3 = CBR(base * 8, base * 16, bn=True, sample=_choose(3), activation=F.leaky_relu, dropout=is_weak)
if extensive_layers > 4:
self.c4 = Convolution1D(base * 16, 1, 3, 1, 1, initialW=w)
else:
self.c4 = Convolution1D(base * 16, 1, 1, 1, 0, initialW=w)
def __call__(self, x_0, x_1):
h = F.concat([self.c0_0(x_0), self.c0_1(x_1)])
h = self.c1(h)
h = self.c2(h)
h = self.c3(h)
h = self.c4(h)
# h = F.average_pooling_2d(h, h.data.shape[2], 1, 0)
return h
def create_predictor(config: ModelConfig):
return Predictor(
in_ch=config.in_channels,
out_ch=config.out_channels,
base=config.generator_base_channels,
extensive_layers=config.generator_extensive_layers,
)
def create_discriminator(config: ModelConfig):
return Discriminator(
in_ch=config.in_channels,
out_ch=config.out_channels,
base=config.discriminator_base_channels,
extensive_layers=config.discriminator_extensive_layers,
is_weak=config.weak_discriminator,
)
def create(config: ModelConfig):
predictor = create_predictor(config)
discriminator = create_discriminator(config)
return predictor, discriminator
| {"/become_yukarin/dataset/dataset.py": ["/become_yukarin/config/config.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py"], "/become_yukarin/voice_changer.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/data_struct.py", "/become_yukarin/super_resolution.py"], "/train.py": ["/become_yukarin/config/config.py", "/become_yukarin/dataset/__init__.py", "/become_yukarin/model/model.py", "/become_yukarin/updater/updater.py"], "/become_yukarin/__init__.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/super_resolution.py", "/become_yukarin/vocoder.py", "/become_yukarin/voice_changer.py"], "/become_yukarin/model/model.py": ["/become_yukarin/config/config.py"], "/become_yukarin/model/cbhg_model.py": ["/become_yukarin/config/old_config.py"], "/become_yukarin/updater/updater.py": ["/become_yukarin/config/config.py", "/become_yukarin/model/model.py"], "/tests/test_dataset.py": ["/become_yukarin/dataset/__init__.py"], "/become_yukarin/acoustic_converter.py": ["/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/model/model.py"], "/scripts/extract_spectrogram_pair.py": ["/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/scripts/extract_acoustic_feature.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/dataset/utility.py", "/become_yukarin/param.py"], "/scripts/super_resolution_test.py": ["/become_yukarin/__init__.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/config/config.py": ["/become_yukarin/param.py"], "/become_yukarin/super_resolution.py": ["/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/vocoder.py": ["/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/become_yukarin/config/sr_config.py": ["/become_yukarin/param.py"], "/become_yukarin/dataset/__init__.py": ["/become_yukarin/dataset/dataset.py"]} |
67,445 | HamaguchiKazuki/become-yukarin | refs/heads/master | /setup.py | from setuptools import setup, find_packages
setup(
name='become_yukarin',
version='1.0.0',
packages=find_packages(),
url='https://github.com/Hiroshiba/become-yukarin',
author='Kazuyuki Hiroshiba',
author_email='hihokaruta@gmail.com',
description='become Yuduki Yukari with DeepLearning power.',
license='MIT License',
install_requires=[
'numpy',
'chainer',
'librosa',
'pysptk',
'pyworld',
'fastdtw',
'chainerui',
],
classifiers=[
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
]
)
| {"/become_yukarin/dataset/dataset.py": ["/become_yukarin/config/config.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py"], "/become_yukarin/voice_changer.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/data_struct.py", "/become_yukarin/super_resolution.py"], "/train.py": ["/become_yukarin/config/config.py", "/become_yukarin/dataset/__init__.py", "/become_yukarin/model/model.py", "/become_yukarin/updater/updater.py"], "/become_yukarin/__init__.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/super_resolution.py", "/become_yukarin/vocoder.py", "/become_yukarin/voice_changer.py"], "/become_yukarin/model/model.py": ["/become_yukarin/config/config.py"], "/become_yukarin/model/cbhg_model.py": ["/become_yukarin/config/old_config.py"], "/become_yukarin/updater/updater.py": ["/become_yukarin/config/config.py", "/become_yukarin/model/model.py"], "/tests/test_dataset.py": ["/become_yukarin/dataset/__init__.py"], "/become_yukarin/acoustic_converter.py": ["/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/model/model.py"], "/scripts/extract_spectrogram_pair.py": ["/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/scripts/extract_acoustic_feature.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/dataset/utility.py", "/become_yukarin/param.py"], "/scripts/super_resolution_test.py": ["/become_yukarin/__init__.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/config/config.py": ["/become_yukarin/param.py"], "/become_yukarin/super_resolution.py": ["/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/vocoder.py": ["/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/become_yukarin/config/sr_config.py": ["/become_yukarin/param.py"], "/become_yukarin/dataset/__init__.py": ["/become_yukarin/dataset/dataset.py"]} |
67,446 | HamaguchiKazuki/become-yukarin | refs/heads/master | /become_yukarin/model/cbhg_model.py | from functools import partial
from typing import List
import chainer
from become_yukarin.config.old_config import CBHGDiscriminatorModelConfig
from become_yukarin.config.old_config import CBHGModelConfig
class Convolution1D(chainer.links.ConvolutionND):
def __init__(self, in_channels, out_channels, ksize, stride=1, pad=0,
nobias=False, initialW=None, initial_bias=None,
cover_all=False):
super().__init__(
ndim=1,
in_channels=in_channels,
out_channels=out_channels,
ksize=ksize,
stride=stride,
pad=pad,
nobias=nobias,
initialW=initialW,
initial_bias=initial_bias,
cover_all=cover_all,
)
class LegacyConvolution1D(chainer.links.Convolution2D):
def __init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0,
nobias=False, initialW=None, initial_bias=None, **kwargs):
assert ksize is None or isinstance(ksize, int)
assert isinstance(stride, int)
assert isinstance(pad, int)
super().__init__(
in_channels=in_channels,
out_channels=out_channels,
ksize=(ksize, 1),
stride=(stride, 1),
pad=(pad, 0),
nobias=nobias,
initialW=initialW,
initial_bias=initial_bias,
**kwargs,
)
def __call__(self, x):
assert x.shape[-1] == 1
return super().__call__(x)
class ConvHighway(chainer.link.Chain):
def __init__(self, in_out_size, nobias=False, activate=chainer.functions.relu,
init_Wh=None, init_Wt=None, init_bh=None, init_bt=-1):
super().__init__()
self.activate = activate
with self.init_scope():
self.plain = Convolution1D(
in_out_size, in_out_size, 1, nobias=nobias,
initialW=init_Wh, initial_bias=init_bh)
self.transform = Convolution1D(
in_out_size, in_out_size, 1, nobias=nobias,
initialW=init_Wt, initial_bias=init_bt)
def __call__(self, x):
out_plain = self.activate(self.plain(x))
out_transform = chainer.functions.sigmoid(self.transform(x))
y = out_plain * out_transform + x * (1 - out_transform)
return y
class PreNet(chainer.link.Chain):
def __init__(self, in_channels: int, hidden_channels: int, out_channels: int) -> None:
super().__init__()
with self.init_scope():
self.conv1 = Convolution1D(in_channels, hidden_channels, 1)
self.conv2 = Convolution1D(hidden_channels, out_channels, 1)
def __call__(self, x):
h = x
h = chainer.functions.dropout((chainer.functions.relu(self.conv1(h)), 0.5))
h = chainer.functions.dropout((chainer.functions.relu(self.conv2(h)), 0.5))
return h
class Conv1DBank(chainer.link.Chain):
def __init__(self, in_channels: int, out_channels: int, k: int) -> None:
super().__init__()
self.stacked_channels = out_channels * k
self.pads = [
partial(chainer.functions.pad, pad_width=((0, 0), (0, 0), (i // 2, (i + 1) // 2)), mode='constant')
for i in range(k)
]
with self.init_scope():
self.convs = chainer.link.ChainList(
*(Convolution1D(in_channels, out_channels, i + 1, nobias=True) for i in range(k))
)
self.bn = chainer.links.BatchNormalization(out_channels * k)
def __call__(self, x):
h = x
h = chainer.functions.concat([conv(pad(h)) for pad, conv in zip(self.pads, self.convs)])
h = chainer.functions.relu(self.bn(h))
return h
class Conv1DProjections(chainer.link.Chain):
def __init__(self, in_channels: int, hidden_channels: int, out_channels: int) -> None:
super().__init__()
with self.init_scope():
self.conv1 = Convolution1D(in_channels, hidden_channels, 3, pad=1, nobias=True)
self.bn1 = chainer.links.BatchNormalization(hidden_channels)
self.conv2 = Convolution1D(hidden_channels, out_channels, 3, pad=1, nobias=True)
self.bn2 = chainer.links.BatchNormalization(out_channels)
def __call__(self, x):
h = x
h = chainer.functions.relu(self.bn1(self.conv1(h)))
h = chainer.functions.relu(self.bn2(self.conv2(h)))
return h
class CBHG(chainer.link.Chain):
def __init__(
self,
in_channels: int,
conv_bank_out_channels: int,
conv_bank_k: int,
max_pooling_k: int,
conv_projections_hidden_channels: int,
highway_layers: int,
out_channels: int,
disable_last_rnn: bool,
) -> None:
super().__init__()
self.max_pooling_padding = partial(
chainer.functions.pad,
pad_width=((0, 0), (0, 0), ((max_pooling_k - 1) // 2, max_pooling_k // 2)),
mode='constant',
)
self.max_pooling = chainer.functions.MaxPoolingND(1, max_pooling_k, 1, cover_all=False)
self.out_size = out_channels * (1 if disable_last_rnn else 2)
with self.init_scope():
self.conv_bank = Conv1DBank(
in_channels=in_channels,
out_channels=conv_bank_out_channels,
k=conv_bank_k,
)
self.conv_projectoins = Conv1DProjections(
in_channels=self.conv_bank.stacked_channels,
hidden_channels=conv_projections_hidden_channels,
out_channels=out_channels,
)
self.highways = chainer.link.ChainList(
*([ConvHighway(out_channels) for _ in range(highway_layers)])
)
if not disable_last_rnn:
self.gru = chainer.links.NStepBiGRU(
n_layers=1,
in_size=out_channels,
out_size=out_channels,
dropout=0.0,
)
def __call__(self, x):
h = x
h = self.conv_bank(h)
h = self.max_pooling(self.max_pooling_padding(h))
h = self.conv_projectoins(h)
h = h + x
for highway in self.highways:
h = highway(h)
if hasattr(self, 'gru'):
h = chainer.functions.separate(chainer.functions.transpose(h, axes=(0, 2, 1)))
_, h = self.gru(None, h)
h = chainer.functions.transpose(chainer.functions.stack(h), axes=(0, 2, 1))
return h
class Predictor(chainer.link.Chain):
def __init__(self, network, out_size: int) -> None:
super().__init__()
with self.init_scope():
self.network = network
self.last = Convolution1D(network.out_size, out_size, 1)
def __call__(self, x):
h = x
h = self.network(h)
h = self.last(h)
return h
class Aligner(chainer.link.Chain):
def __init__(self, in_size: int, out_time_length: int) -> None:
super().__init__()
with self.init_scope():
self.gru = chainer.links.NStepBiGRU(
n_layers=1,
in_size=in_size,
out_size=in_size // 2,
dropout=0.0,
)
self.last = Convolution1D(in_size // 2 * 2, out_time_length, 1)
def __call__(self, x):
"""
:param x: (batch, channel, timeA)
"""
h = x
h = chainer.functions.separate(chainer.functions.transpose(h, axes=(0, 2, 1))) # h: batch * (timeA, channel)
_, h = self.gru(None, h) # h: batch * (timeA, ?)
h = chainer.functions.transpose(chainer.functions.stack(h), axes=(0, 2, 1)) # h: (batch, ?, timeA)
h = chainer.functions.softmax(self.last(h), axis=1) # h: (batch, timeB, timeA)
h = chainer.functions.matmul(x, h) # h: (batch, channel, time)
return h
class Discriminator(chainer.link.Chain):
def __init__(self, in_channels: int, hidden_channels_list: List[int]) -> None:
super().__init__()
with self.init_scope():
self.convs = chainer.link.ChainList(*(
LegacyConvolution1D(i_c, o_c, ksize=2, stride=2)
for i_c, o_c in zip([in_channels] + hidden_channels_list[:-1], hidden_channels_list)
))
self.last_conv = LegacyConvolution1D(hidden_channels_list[-1], 1, ksize=1)
def __call__(self, x):
"""
:param x: (batch, channel, time)
"""
h = x
h = chainer.functions.reshape(h, h.shape + (1,))
for conv in self.convs.children():
h = chainer.functions.relu(conv(h))
h = self.last_conv(h)
h = chainer.functions.reshape(h, h.shape[:-1])
return h
def create_predictor(config: CBHGModelConfig):
network = CBHG(
in_channels=config.in_channels,
conv_bank_out_channels=config.conv_bank_out_channels,
conv_bank_k=config.conv_bank_k,
max_pooling_k=config.max_pooling_k,
conv_projections_hidden_channels=config.conv_projections_hidden_channels,
highway_layers=config.highway_layers,
out_channels=config.out_channels,
disable_last_rnn=config.disable_last_rnn,
)
predictor = Predictor(
network=network,
out_size=config.out_size,
)
return predictor
def create_aligner(config: CBHGModelConfig):
assert config.enable_aligner
aligner = Aligner(
in_size=config.in_channels,
out_time_length=config.aligner_out_time_length,
)
return aligner
def create_discriminator(config: CBHGDiscriminatorModelConfig):
discriminator = Discriminator(
in_channels=config.in_channels,
hidden_channels_list=config.hidden_channels_list,
)
return discriminator
def create(config: CBHGModelConfig):
predictor = create_predictor(config)
if config.enable_aligner:
aligner = create_aligner(config)
else:
aligner = None
if config.discriminator is not None:
discriminator = create_discriminator(config.discriminator)
else:
discriminator = None
return predictor, aligner, discriminator
| {"/become_yukarin/dataset/dataset.py": ["/become_yukarin/config/config.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py"], "/become_yukarin/voice_changer.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/data_struct.py", "/become_yukarin/super_resolution.py"], "/train.py": ["/become_yukarin/config/config.py", "/become_yukarin/dataset/__init__.py", "/become_yukarin/model/model.py", "/become_yukarin/updater/updater.py"], "/become_yukarin/__init__.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/super_resolution.py", "/become_yukarin/vocoder.py", "/become_yukarin/voice_changer.py"], "/become_yukarin/model/model.py": ["/become_yukarin/config/config.py"], "/become_yukarin/model/cbhg_model.py": ["/become_yukarin/config/old_config.py"], "/become_yukarin/updater/updater.py": ["/become_yukarin/config/config.py", "/become_yukarin/model/model.py"], "/tests/test_dataset.py": ["/become_yukarin/dataset/__init__.py"], "/become_yukarin/acoustic_converter.py": ["/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/model/model.py"], "/scripts/extract_spectrogram_pair.py": ["/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/scripts/extract_acoustic_feature.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/dataset/utility.py", "/become_yukarin/param.py"], "/scripts/super_resolution_test.py": ["/become_yukarin/__init__.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/config/config.py": ["/become_yukarin/param.py"], "/become_yukarin/super_resolution.py": ["/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/vocoder.py": ["/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/become_yukarin/config/sr_config.py": ["/become_yukarin/param.py"], "/become_yukarin/dataset/__init__.py": ["/become_yukarin/dataset/dataset.py"]} |
67,447 | HamaguchiKazuki/become-yukarin | refs/heads/master | /become_yukarin/updater/updater.py | import chainer
import chainer.functions as F
from become_yukarin.config.config import LossConfig
from become_yukarin.model.model import Discriminator
from become_yukarin.model.model import Predictor
class Updater(chainer.training.StandardUpdater):
def __init__(
self,
loss_config: LossConfig,
predictor: Predictor,
discriminator: Discriminator,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.loss_config = loss_config
self.predictor = predictor
self.discriminator = discriminator
def _loss_predictor(self, predictor, output, target, d_fake):
b, _, t = d_fake.data.shape
loss_mse = (F.mean_absolute_error(output, target))
chainer.report({'mse': loss_mse}, predictor)
loss_adv = F.sum(F.softplus(-d_fake)) / (b * t)
chainer.report({'adversarial': loss_adv}, predictor)
loss = self.loss_config.mse * loss_mse + self.loss_config.adversarial * loss_adv
chainer.report({'loss': loss}, predictor)
return loss
def _loss_discriminator(self, discriminator, d_real, d_fake):
b, _, t = d_real.data.shape
loss_real = F.sum(F.softplus(-d_real)) / (b * t)
chainer.report({'real': loss_real}, discriminator)
loss_fake = F.sum(F.softplus(d_fake)) / (b * t)
chainer.report({'fake': loss_fake}, discriminator)
loss = loss_real + loss_fake
chainer.report({'loss': loss}, discriminator)
tp = (d_real.data > 0.5).sum()
fp = (d_fake.data > 0.5).sum()
fn = (d_real.data <= 0.5).sum()
tn = (d_fake.data <= 0.5).sum()
accuracy = (tp + tn) / (tp + fp + fn + tn)
precision = tp / (tp + fp)
recall = tp / (tp + fn)
chainer.report({'accuracy': accuracy}, self.discriminator)
chainer.report({'precision': precision}, self.discriminator)
chainer.report({'recall': recall}, self.discriminator)
return loss
def forward(self, input, target, mask):
input = chainer.as_variable(input)
target = chainer.as_variable(target)
mask = chainer.as_variable(mask)
output = self.predictor(input)
output = output * mask
target = target * mask
d_fake = self.discriminator(input, output)
d_real = self.discriminator(input, target)
loss = {
'predictor': self._loss_predictor(self.predictor, output, target, d_fake),
'discriminator': self._loss_discriminator(self.discriminator, d_real, d_fake),
}
return loss
def update_core(self):
opt_predictor = self.get_optimizer('predictor')
opt_discriminator = self.get_optimizer('discriminator')
batch = self.get_iterator('main').next()
batch = self.converter(batch, self.device)
loss = self.forward(**batch)
opt_predictor.update(loss.get, 'predictor')
opt_discriminator.update(loss.get, 'discriminator')
| {"/become_yukarin/dataset/dataset.py": ["/become_yukarin/config/config.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py"], "/become_yukarin/voice_changer.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/data_struct.py", "/become_yukarin/super_resolution.py"], "/train.py": ["/become_yukarin/config/config.py", "/become_yukarin/dataset/__init__.py", "/become_yukarin/model/model.py", "/become_yukarin/updater/updater.py"], "/become_yukarin/__init__.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/super_resolution.py", "/become_yukarin/vocoder.py", "/become_yukarin/voice_changer.py"], "/become_yukarin/model/model.py": ["/become_yukarin/config/config.py"], "/become_yukarin/model/cbhg_model.py": ["/become_yukarin/config/old_config.py"], "/become_yukarin/updater/updater.py": ["/become_yukarin/config/config.py", "/become_yukarin/model/model.py"], "/tests/test_dataset.py": ["/become_yukarin/dataset/__init__.py"], "/become_yukarin/acoustic_converter.py": ["/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/model/model.py"], "/scripts/extract_spectrogram_pair.py": ["/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/scripts/extract_acoustic_feature.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/dataset/utility.py", "/become_yukarin/param.py"], "/scripts/super_resolution_test.py": ["/become_yukarin/__init__.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/config/config.py": ["/become_yukarin/param.py"], "/become_yukarin/super_resolution.py": ["/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/vocoder.py": ["/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/become_yukarin/config/sr_config.py": ["/become_yukarin/param.py"], "/become_yukarin/dataset/__init__.py": ["/become_yukarin/dataset/dataset.py"]} |
67,448 | HamaguchiKazuki/become-yukarin | refs/heads/master | /become_yukarin/config/__init__.py | from . import config
from . import sr_config
| {"/become_yukarin/dataset/dataset.py": ["/become_yukarin/config/config.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py"], "/become_yukarin/voice_changer.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/data_struct.py", "/become_yukarin/super_resolution.py"], "/train.py": ["/become_yukarin/config/config.py", "/become_yukarin/dataset/__init__.py", "/become_yukarin/model/model.py", "/become_yukarin/updater/updater.py"], "/become_yukarin/__init__.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/super_resolution.py", "/become_yukarin/vocoder.py", "/become_yukarin/voice_changer.py"], "/become_yukarin/model/model.py": ["/become_yukarin/config/config.py"], "/become_yukarin/model/cbhg_model.py": ["/become_yukarin/config/old_config.py"], "/become_yukarin/updater/updater.py": ["/become_yukarin/config/config.py", "/become_yukarin/model/model.py"], "/tests/test_dataset.py": ["/become_yukarin/dataset/__init__.py"], "/become_yukarin/acoustic_converter.py": ["/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/model/model.py"], "/scripts/extract_spectrogram_pair.py": ["/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/scripts/extract_acoustic_feature.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/dataset/utility.py", "/become_yukarin/param.py"], "/scripts/super_resolution_test.py": ["/become_yukarin/__init__.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/config/config.py": ["/become_yukarin/param.py"], "/become_yukarin/super_resolution.py": ["/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/vocoder.py": ["/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/become_yukarin/config/sr_config.py": ["/become_yukarin/param.py"], "/become_yukarin/dataset/__init__.py": ["/become_yukarin/dataset/dataset.py"]} |
67,449 | HamaguchiKazuki/become-yukarin | refs/heads/master | /scripts/ln_jnas_subset.py | import argparse
import multiprocessing
from pathlib import Path
from jnas_metadata_loader import load_from_directory
from jnas_metadata_loader.jnas_metadata import JnasMetadata
parser = argparse.ArgumentParser()
parser.add_argument('jnas', type=Path)
parser.add_argument('output', type=Path)
parser.add_argument('--format', default='{sex}{text_id}_{mic}_atr_{subset}{sen_id}.wav')
argument = parser.parse_args()
jnas = argument.jnas # type: Path
output = argument.output # type: Path
jnas_list = load_from_directory(str(jnas))
atr_list = jnas_list.subset_news_or_atr('B')
output.mkdir(exist_ok=True)
def process(d: JnasMetadata):
p = d.path
out = output / argument.format.format(**d._asdict())
out.symlink_to(p)
pool = multiprocessing.Pool()
pool.map(process, atr_list)
| {"/become_yukarin/dataset/dataset.py": ["/become_yukarin/config/config.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py"], "/become_yukarin/voice_changer.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/data_struct.py", "/become_yukarin/super_resolution.py"], "/train.py": ["/become_yukarin/config/config.py", "/become_yukarin/dataset/__init__.py", "/become_yukarin/model/model.py", "/become_yukarin/updater/updater.py"], "/become_yukarin/__init__.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/super_resolution.py", "/become_yukarin/vocoder.py", "/become_yukarin/voice_changer.py"], "/become_yukarin/model/model.py": ["/become_yukarin/config/config.py"], "/become_yukarin/model/cbhg_model.py": ["/become_yukarin/config/old_config.py"], "/become_yukarin/updater/updater.py": ["/become_yukarin/config/config.py", "/become_yukarin/model/model.py"], "/tests/test_dataset.py": ["/become_yukarin/dataset/__init__.py"], "/become_yukarin/acoustic_converter.py": ["/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/model/model.py"], "/scripts/extract_spectrogram_pair.py": ["/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/scripts/extract_acoustic_feature.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/dataset/utility.py", "/become_yukarin/param.py"], "/scripts/super_resolution_test.py": ["/become_yukarin/__init__.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/config/config.py": ["/become_yukarin/param.py"], "/become_yukarin/super_resolution.py": ["/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/vocoder.py": ["/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/become_yukarin/config/sr_config.py": ["/become_yukarin/param.py"], "/become_yukarin/dataset/__init__.py": ["/become_yukarin/dataset/dataset.py"]} |
67,450 | HamaguchiKazuki/become-yukarin | refs/heads/master | /become_yukarin/config/old_config.py | from typing import List
from typing import NamedTuple
from typing import Optional
class CBHGDiscriminatorModelConfig(NamedTuple):
in_channels: int
hidden_channels_list: List[int]
class CBHGModelConfig(NamedTuple):
in_channels: int
conv_bank_out_channels: int
conv_bank_k: int
max_pooling_k: int
conv_projections_hidden_channels: int
highway_layers: int
out_channels: int
out_size: int
aligner_out_time_length: int
disable_last_rnn: bool
enable_aligner: bool
discriminator: Optional[CBHGDiscriminatorModelConfig]
class CBHGLossConfig(NamedTuple):
l1: float
predictor_fake: float
discriminator_true: float
discriminator_fake: float
discriminator_grad: float
| {"/become_yukarin/dataset/dataset.py": ["/become_yukarin/config/config.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py"], "/become_yukarin/voice_changer.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/data_struct.py", "/become_yukarin/super_resolution.py"], "/train.py": ["/become_yukarin/config/config.py", "/become_yukarin/dataset/__init__.py", "/become_yukarin/model/model.py", "/become_yukarin/updater/updater.py"], "/become_yukarin/__init__.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/super_resolution.py", "/become_yukarin/vocoder.py", "/become_yukarin/voice_changer.py"], "/become_yukarin/model/model.py": ["/become_yukarin/config/config.py"], "/become_yukarin/model/cbhg_model.py": ["/become_yukarin/config/old_config.py"], "/become_yukarin/updater/updater.py": ["/become_yukarin/config/config.py", "/become_yukarin/model/model.py"], "/tests/test_dataset.py": ["/become_yukarin/dataset/__init__.py"], "/become_yukarin/acoustic_converter.py": ["/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/model/model.py"], "/scripts/extract_spectrogram_pair.py": ["/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/scripts/extract_acoustic_feature.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/dataset/utility.py", "/become_yukarin/param.py"], "/scripts/super_resolution_test.py": ["/become_yukarin/__init__.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/config/config.py": ["/become_yukarin/param.py"], "/become_yukarin/super_resolution.py": ["/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/vocoder.py": ["/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/become_yukarin/config/sr_config.py": ["/become_yukarin/param.py"], "/become_yukarin/dataset/__init__.py": ["/become_yukarin/dataset/dataset.py"]} |
67,451 | HamaguchiKazuki/become-yukarin | refs/heads/master | /tests/test_dataset.py | import unittest
import numpy
from become_yukarin.dataset import dataset
class TestDataset(unittest.TestCase):
def setUp(self):
self.sample_rate = 24000
self.len_time = len_time = 100
self.fft_size = fft_size = 1024
self.order = order = 59
self.dummy_feature = dataset.AcousticFeature(
f0=numpy.arange(len_time).reshape((len_time, -1)),
spectrogram=numpy.arange(len_time * (fft_size // 2 + 1)).reshape((len_time, -1)),
aperiodicity=numpy.arange(len_time * (fft_size // 2 + 1)).reshape((len_time, -1)),
mfcc=numpy.arange(len_time * (order + 1)).reshape((len_time, -1)),
voiced=(numpy.arange(len_time) % 2 == 1).reshape((len_time, -1)),
)
self.feature_sizes = dataset.AcousticFeature.get_sizes(
sampling_rate=self.sample_rate,
order=self.order,
)
def test_encode_decode_feature(self):
encode_feature = dataset.EncodeFeatureProcess(['mfcc'])
decode_feature = dataset.DecodeFeatureProcess(['mfcc'], self.feature_sizes)
e = encode_feature(self.dummy_feature, test=True)
d = decode_feature(e, test=True)
self.assertTrue(numpy.all(self.dummy_feature.mfcc == d.mfcc))
def test_encode_decode_feature2(self):
encode_feature = dataset.EncodeFeatureProcess(['mfcc', 'f0'])
decode_feature = dataset.DecodeFeatureProcess(['mfcc', 'f0'], self.feature_sizes)
e = encode_feature(self.dummy_feature, test=True)
d = decode_feature(e, test=True)
self.assertTrue(numpy.all(self.dummy_feature.mfcc == d.mfcc))
self.assertTrue(numpy.all(self.dummy_feature.f0 == d.f0))
def test_encode_decode_feature3(self):
encode_feature = dataset.EncodeFeatureProcess(['mfcc', 'f0'])
decode_feature = dataset.DecodeFeatureProcess(['mfcc', 'f0'], self.feature_sizes)
e = encode_feature(self.dummy_feature, test=True)
e[0] = numpy.nan
d = decode_feature(e, test=True)
self.assertFalse(numpy.all(self.dummy_feature.mfcc == d.mfcc))
self.assertTrue(numpy.all(self.dummy_feature.f0 == d.f0))
if __name__ == '__main__':
unittest.main()
| {"/become_yukarin/dataset/dataset.py": ["/become_yukarin/config/config.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py"], "/become_yukarin/voice_changer.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/data_struct.py", "/become_yukarin/super_resolution.py"], "/train.py": ["/become_yukarin/config/config.py", "/become_yukarin/dataset/__init__.py", "/become_yukarin/model/model.py", "/become_yukarin/updater/updater.py"], "/become_yukarin/__init__.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/super_resolution.py", "/become_yukarin/vocoder.py", "/become_yukarin/voice_changer.py"], "/become_yukarin/model/model.py": ["/become_yukarin/config/config.py"], "/become_yukarin/model/cbhg_model.py": ["/become_yukarin/config/old_config.py"], "/become_yukarin/updater/updater.py": ["/become_yukarin/config/config.py", "/become_yukarin/model/model.py"], "/tests/test_dataset.py": ["/become_yukarin/dataset/__init__.py"], "/become_yukarin/acoustic_converter.py": ["/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/model/model.py"], "/scripts/extract_spectrogram_pair.py": ["/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/scripts/extract_acoustic_feature.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/dataset/utility.py", "/become_yukarin/param.py"], "/scripts/super_resolution_test.py": ["/become_yukarin/__init__.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/config/config.py": ["/become_yukarin/param.py"], "/become_yukarin/super_resolution.py": ["/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/vocoder.py": ["/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/become_yukarin/config/sr_config.py": ["/become_yukarin/param.py"], "/become_yukarin/dataset/__init__.py": ["/become_yukarin/dataset/dataset.py"]} |
67,452 | HamaguchiKazuki/become-yukarin | refs/heads/master | /become_yukarin/acoustic_converter.py | from functools import partial
from pathlib import Path
from typing import Optional
import chainer
import numpy
import pysptk
import pyworld
from become_yukarin.config.config import Config
from become_yukarin.data_struct import AcousticFeature
from become_yukarin.data_struct import Wave
from become_yukarin.dataset.dataset import AcousticFeatureDenormalizeProcess
from become_yukarin.dataset.dataset import AcousticFeatureLoadProcess
from become_yukarin.dataset.dataset import AcousticFeatureNormalizeProcess
from become_yukarin.dataset.dataset import AcousticFeatureProcess
from become_yukarin.dataset.dataset import DecodeFeatureProcess
from become_yukarin.dataset.dataset import EncodeFeatureProcess
from become_yukarin.dataset.dataset import WaveFileLoadProcess
from become_yukarin.model.model import create_predictor
class AcousticConverter(object):
def __init__(self, config: Config, model_path: Path, gpu: int = None) -> None:
self.config = config
self.model_path = model_path
self.gpu = gpu
self.model = model = create_predictor(config.model)
chainer.serializers.load_npz(str(model_path), model)
if self.gpu is not None:
model.to_gpu(self.gpu)
self._param = param = config.dataset.param
self._wave_process = WaveFileLoadProcess(
sample_rate=param.voice_param.sample_rate,
top_db=None,
)
self._feature_process = AcousticFeatureProcess(
frame_period=param.acoustic_feature_param.frame_period,
order=param.acoustic_feature_param.order,
alpha=param.acoustic_feature_param.alpha,
f0_estimating_method=param.acoustic_feature_param.f0_estimating_method,
)
self._acoustic_feature_load_process = acoustic_feature_load_process = AcousticFeatureLoadProcess()
input_mean = acoustic_feature_load_process(config.dataset.input_mean_path, test=True)
input_var = acoustic_feature_load_process(config.dataset.input_var_path, test=True)
target_mean = acoustic_feature_load_process(config.dataset.target_mean_path, test=True)
target_var = acoustic_feature_load_process(config.dataset.target_var_path, test=True)
self._feature_normalize = AcousticFeatureNormalizeProcess(
mean=input_mean,
var=input_var,
)
self._feature_denormalize = AcousticFeatureDenormalizeProcess(
mean=target_mean,
var=target_var,
)
feature_sizes = AcousticFeature.get_sizes(
sampling_rate=param.voice_param.sample_rate,
order=param.acoustic_feature_param.order,
)
self._encode_feature = EncodeFeatureProcess(config.dataset.features)
self._decode_feature = DecodeFeatureProcess(config.dataset.features, feature_sizes)
def convert_to_feature(self, input: AcousticFeature, out_sampling_rate: Optional[int] = None):
if out_sampling_rate is None:
out_sampling_rate = self.config.dataset.param.voice_param.sample_rate
input_feature = input
input = self._feature_normalize(input, test=True)
input = self._encode_feature(input, test=True)
pad = 128 - input.shape[1] % 128
input = numpy.pad(input, [(0, 0), (0, pad)], mode='minimum')
converter = partial(chainer.dataset.convert.concat_examples, device=self.gpu, padding=0)
inputs = converter([input])
with chainer.using_config('train', False):
out = self.model(inputs).data[0]
if self.gpu is not None:
out = chainer.cuda.to_cpu(out)
out = out[:, :-pad]
out = self._decode_feature(out, test=True)
out = AcousticFeature(
f0=out.f0,
spectrogram=out.spectrogram,
aperiodicity=out.aperiodicity,
mfcc=out.mfcc,
voiced=input_feature.voiced,
)
out = self._feature_denormalize(out, test=True)
out = AcousticFeature(
f0=out.f0,
spectrogram=out.spectrogram,
aperiodicity=input_feature.aperiodicity,
mfcc=out.mfcc,
voiced=out.voiced,
)
fftlen = pyworld.get_cheaptrick_fft_size(out_sampling_rate)
spectrogram = pysptk.mc2sp(
out.mfcc,
alpha=self._param.acoustic_feature_param.alpha,
fftlen=fftlen,
)
out = AcousticFeature(
f0=out.f0,
spectrogram=spectrogram,
aperiodicity=out.aperiodicity,
mfcc=out.mfcc,
voiced=out.voiced,
).astype(numpy.float64)
return out
def convert_from_audio_path(self, path: Path, out_sampling_rate: Optional[int] = None):
wave = self._wave_process(str(path), test=True)
feature = self._feature_process(wave, test=True)
return self.convert_from_feature(feature, out_sampling_rate)
def convert_from_feature_path(self, path: Path, out_sampling_rate: Optional[int] = None):
feature = self._acoustic_feature_load_process(path, test=True)
return self.convert_from_feature(feature, out_sampling_rate)
def convert_from_feature(self, input: AcousticFeature, out_sampling_rate: Optional[int] = None):
if out_sampling_rate is None:
out_sampling_rate = self.config.dataset.param.voice_param.sample_rate
out = self.convert_to_feature(input=input, out_sampling_rate=out_sampling_rate)
out = pyworld.synthesize(
f0=out.f0.ravel(),
spectrogram=out.spectrogram,
aperiodicity=out.aperiodicity,
fs=out_sampling_rate,
frame_period=self._param.acoustic_feature_param.frame_period,
)
return Wave(out, sampling_rate=out_sampling_rate)
def __call__(self, voice_path: Path, out_sampling_rate: Optional[int] = None):
return self.convert_from_audio_path(voice_path, out_sampling_rate)
| {"/become_yukarin/dataset/dataset.py": ["/become_yukarin/config/config.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py"], "/become_yukarin/voice_changer.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/data_struct.py", "/become_yukarin/super_resolution.py"], "/train.py": ["/become_yukarin/config/config.py", "/become_yukarin/dataset/__init__.py", "/become_yukarin/model/model.py", "/become_yukarin/updater/updater.py"], "/become_yukarin/__init__.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/super_resolution.py", "/become_yukarin/vocoder.py", "/become_yukarin/voice_changer.py"], "/become_yukarin/model/model.py": ["/become_yukarin/config/config.py"], "/become_yukarin/model/cbhg_model.py": ["/become_yukarin/config/old_config.py"], "/become_yukarin/updater/updater.py": ["/become_yukarin/config/config.py", "/become_yukarin/model/model.py"], "/tests/test_dataset.py": ["/become_yukarin/dataset/__init__.py"], "/become_yukarin/acoustic_converter.py": ["/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/model/model.py"], "/scripts/extract_spectrogram_pair.py": ["/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/scripts/extract_acoustic_feature.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/dataset/utility.py", "/become_yukarin/param.py"], "/scripts/super_resolution_test.py": ["/become_yukarin/__init__.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/config/config.py": ["/become_yukarin/param.py"], "/become_yukarin/super_resolution.py": ["/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/vocoder.py": ["/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/become_yukarin/config/sr_config.py": ["/become_yukarin/param.py"], "/become_yukarin/dataset/__init__.py": ["/become_yukarin/dataset/dataset.py"]} |
67,453 | HamaguchiKazuki/become-yukarin | refs/heads/master | /scripts/extract_spectrogram_pair.py | """
extract low and high quality spectrogram data.
"""
import argparse
import multiprocessing
from pathlib import Path
from pprint import pprint
import numpy
import pysptk
import pyworld
from tqdm import tqdm
from become_yukarin.dataset.dataset import AcousticFeatureProcess
from become_yukarin.dataset.dataset import WaveFileLoadProcess
from become_yukarin.param import AcousticFeatureParam
from become_yukarin.param import VoiceParam
base_voice_param = VoiceParam()
base_acoustic_feature_param = AcousticFeatureParam()
parser = argparse.ArgumentParser()
parser.add_argument('--input_directory', '-i', type=Path)
parser.add_argument('--output_directory', '-o', type=Path)
parser.add_argument('--sample_rate', type=int, default=base_voice_param.sample_rate)
parser.add_argument('--top_db', type=float, default=base_voice_param.top_db)
parser.add_argument('--pad_second', type=float, default=base_voice_param.pad_second)
parser.add_argument('--frame_period', type=int, default=base_acoustic_feature_param.frame_period)
parser.add_argument('--order', type=int, default=base_acoustic_feature_param.order)
parser.add_argument('--alpha', type=float, default=base_acoustic_feature_param.alpha)
parser.add_argument('--f0_estimating_method', default=base_acoustic_feature_param.f0_estimating_method)
parser.add_argument('--enable_overwrite', action='store_true')
arguments = parser.parse_args()
pprint(dir(arguments))
def generate_file(path):
out = Path(arguments.output_directory, path.stem + '.npy')
if out.exists() and not arguments.enable_overwrite:
return
# load wave and padding
wave_file_load_process = WaveFileLoadProcess(
sample_rate=arguments.sample_rate,
top_db=arguments.top_db,
pad_second=arguments.pad_second,
)
wave = wave_file_load_process(path, test=True)
# make acoustic feature
acoustic_feature_process = AcousticFeatureProcess(
frame_period=arguments.frame_period,
order=arguments.order,
alpha=arguments.alpha,
f0_estimating_method=arguments.f0_estimating_method,
)
feature = acoustic_feature_process(wave, test=True).astype_only_float(numpy.float32)
high_spectrogram = feature.spectrogram
fftlen = pyworld.get_cheaptrick_fft_size(arguments.sample_rate)
low_spectrogram = pysptk.mc2sp(
feature.mfcc,
alpha=arguments.alpha,
fftlen=fftlen,
)
# save
numpy.save(out.absolute(), {
'low': low_spectrogram,
'high': high_spectrogram,
})
def main():
paths = list(sorted(arguments.input_directory.glob('*.wav')))
arguments.output_directory.mkdir(exist_ok=True)
pool = multiprocessing.Pool()
list(tqdm(pool.imap(generate_file, paths), total=len(paths)))
if __name__ == '__main__':
main()
| {"/become_yukarin/dataset/dataset.py": ["/become_yukarin/config/config.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py"], "/become_yukarin/voice_changer.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/data_struct.py", "/become_yukarin/super_resolution.py"], "/train.py": ["/become_yukarin/config/config.py", "/become_yukarin/dataset/__init__.py", "/become_yukarin/model/model.py", "/become_yukarin/updater/updater.py"], "/become_yukarin/__init__.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/super_resolution.py", "/become_yukarin/vocoder.py", "/become_yukarin/voice_changer.py"], "/become_yukarin/model/model.py": ["/become_yukarin/config/config.py"], "/become_yukarin/model/cbhg_model.py": ["/become_yukarin/config/old_config.py"], "/become_yukarin/updater/updater.py": ["/become_yukarin/config/config.py", "/become_yukarin/model/model.py"], "/tests/test_dataset.py": ["/become_yukarin/dataset/__init__.py"], "/become_yukarin/acoustic_converter.py": ["/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/model/model.py"], "/scripts/extract_spectrogram_pair.py": ["/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/scripts/extract_acoustic_feature.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/dataset/utility.py", "/become_yukarin/param.py"], "/scripts/super_resolution_test.py": ["/become_yukarin/__init__.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/config/config.py": ["/become_yukarin/param.py"], "/become_yukarin/super_resolution.py": ["/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/vocoder.py": ["/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/become_yukarin/config/sr_config.py": ["/become_yukarin/param.py"], "/become_yukarin/dataset/__init__.py": ["/become_yukarin/dataset/dataset.py"]} |
67,454 | HamaguchiKazuki/become-yukarin | refs/heads/master | /become_yukarin/param.py | from typing import NamedTuple
class VoiceParam(NamedTuple):
sample_rate: int = 22050
top_db: float = None
pad_second: float = 0.0
class AcousticFeatureParam(NamedTuple):
frame_period: int = 5
order: int = 8
alpha: float = 0.466
f0_estimating_method: str = 'harvest' # dio / harvest
class Param(NamedTuple):
voice_param: VoiceParam = VoiceParam()
acoustic_feature_param: AcousticFeatureParam = AcousticFeatureParam()
| {"/become_yukarin/dataset/dataset.py": ["/become_yukarin/config/config.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py"], "/become_yukarin/voice_changer.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/data_struct.py", "/become_yukarin/super_resolution.py"], "/train.py": ["/become_yukarin/config/config.py", "/become_yukarin/dataset/__init__.py", "/become_yukarin/model/model.py", "/become_yukarin/updater/updater.py"], "/become_yukarin/__init__.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/super_resolution.py", "/become_yukarin/vocoder.py", "/become_yukarin/voice_changer.py"], "/become_yukarin/model/model.py": ["/become_yukarin/config/config.py"], "/become_yukarin/model/cbhg_model.py": ["/become_yukarin/config/old_config.py"], "/become_yukarin/updater/updater.py": ["/become_yukarin/config/config.py", "/become_yukarin/model/model.py"], "/tests/test_dataset.py": ["/become_yukarin/dataset/__init__.py"], "/become_yukarin/acoustic_converter.py": ["/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/model/model.py"], "/scripts/extract_spectrogram_pair.py": ["/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/scripts/extract_acoustic_feature.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/dataset/utility.py", "/become_yukarin/param.py"], "/scripts/super_resolution_test.py": ["/become_yukarin/__init__.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/config/config.py": ["/become_yukarin/param.py"], "/become_yukarin/super_resolution.py": ["/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/vocoder.py": ["/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/become_yukarin/config/sr_config.py": ["/become_yukarin/param.py"], "/become_yukarin/dataset/__init__.py": ["/become_yukarin/dataset/dataset.py"]} |
67,455 | HamaguchiKazuki/become-yukarin | refs/heads/master | /scripts/extract_acoustic_feature.py | """
extract alignments voices.
"""
import argparse
import multiprocessing
from pathlib import Path
from pprint import pprint
import numpy
from become_yukarin.acoustic_converter import AcousticConverter
from become_yukarin.config.config import create_from_json as create_config
from become_yukarin.data_struct import AcousticFeature
from become_yukarin.dataset.dataset import AcousticFeatureLoadProcess
from become_yukarin.dataset.dataset import AcousticFeatureProcess
from become_yukarin.dataset.dataset import AcousticFeatureSaveProcess
from become_yukarin.dataset.dataset import WaveFileLoadProcess
from become_yukarin.dataset.utility import MelCepstrumAligner
from become_yukarin.param import AcousticFeatureParam
from become_yukarin.param import VoiceParam
base_voice_param = VoiceParam()
base_acoustic_feature_param = AcousticFeatureParam()
parser = argparse.ArgumentParser()
parser.add_argument('--input1_directory', '-i1', type=Path)
parser.add_argument('--input2_directory', '-i2', type=Path)
parser.add_argument('--output1_directory', '-o1', type=Path)
parser.add_argument('--output2_directory', '-o2', type=Path)
parser.add_argument('--pre_converter1_config', type=Path)
parser.add_argument('--pre_converter1_model', type=Path)
parser.add_argument('--sample_rate', type=int, default=base_voice_param.sample_rate)
parser.add_argument('--top_db', type=float, default=base_voice_param.top_db)
parser.add_argument('--pad_second', type=float, default=base_voice_param.pad_second)
parser.add_argument('--frame_period', type=int, default=base_acoustic_feature_param.frame_period)
parser.add_argument('--order', type=int, default=base_acoustic_feature_param.order)
parser.add_argument('--alpha', type=float, default=base_acoustic_feature_param.alpha)
parser.add_argument('--f0_estimating_method', type=str, default=base_acoustic_feature_param.f0_estimating_method)
parser.add_argument('--f0_floor1', type=float, default=71)
parser.add_argument('--f0_ceil1', type=float, default=800)
parser.add_argument('--f0_floor2', type=float, default=71)
parser.add_argument('--f0_ceil2', type=float, default=800)
parser.add_argument('--ignore_feature', nargs='+', default=['spectrogram', 'aperiodicity'])
parser.add_argument('--disable_alignment', action='store_true')
parser.add_argument('--enable_overwrite', action='store_true')
arguments = parser.parse_args()
pprint(dir(arguments))
pre_convert = arguments.pre_converter1_config is not None
if pre_convert:
config = create_config(arguments.pre_converter1_config)
pre_converter1 = AcousticConverter(config, arguments.pre_converter1_model)
else:
pre_converter1 = None
def generate_feature(path1, path2):
out1 = Path(arguments.output1_directory, path1.stem + '.npy')
out2 = Path(arguments.output2_directory, path2.stem + '.npy')
if out1.exists() and out2.exists() and not arguments.enable_overwrite:
return
# load wave and padding
wave_file_load_process = WaveFileLoadProcess(
sample_rate=arguments.sample_rate,
top_db=arguments.top_db,
pad_second=arguments.pad_second,
)
wave1 = wave_file_load_process(path1, test=True)
wave2 = wave_file_load_process(path2, test=True)
# make acoustic feature
acoustic_feature_process1 = AcousticFeatureProcess(
frame_period=arguments.frame_period,
order=arguments.order,
alpha=arguments.alpha,
f0_estimating_method=arguments.f0_estimating_method,
f0_floor=arguments.f0_floor1,
f0_ceil=arguments.f0_ceil1,
)
acoustic_feature_process2 = AcousticFeatureProcess(
frame_period=arguments.frame_period,
order=arguments.order,
alpha=arguments.alpha,
f0_estimating_method=arguments.f0_estimating_method,
f0_floor=arguments.f0_floor2,
f0_ceil=arguments.f0_ceil2,
)
f1 = acoustic_feature_process1(wave1, test=True).astype_only_float(numpy.float32)
f2 = acoustic_feature_process2(wave2, test=True).astype_only_float(numpy.float32)
# pre convert
if pre_convert:
f1_ref = pre_converter1.convert_to_feature(f1)
else:
f1_ref = f1
# alignment
if not arguments.disable_alignment:
aligner = MelCepstrumAligner(f1_ref.mfcc, f2.mfcc)
f0_1, f0_2 = aligner.align(f1.f0, f2.f0)
spectrogram_1, spectrogram_2 = aligner.align(f1.spectrogram, f2.spectrogram)
aperiodicity_1, aperiodicity_2 = aligner.align(f1.aperiodicity, f2.aperiodicity)
mfcc_1, mfcc_2 = aligner.align(f1.mfcc, f2.mfcc)
voiced_1, voiced_2 = aligner.align(f1.voiced, f2.voiced)
f1 = AcousticFeature(
f0=f0_1,
spectrogram=spectrogram_1,
aperiodicity=aperiodicity_1,
mfcc=mfcc_1,
voiced=voiced_1,
)
f2 = AcousticFeature(
f0=f0_2,
spectrogram=spectrogram_2,
aperiodicity=aperiodicity_2,
mfcc=mfcc_2,
voiced=voiced_2,
)
f1.validate()
f2.validate()
# save
acoustic_feature_save_process = AcousticFeatureSaveProcess(validate=True, ignore=arguments.ignore_feature)
acoustic_feature_save_process({'path': out1, 'feature': f1})
print('saved!', out1)
acoustic_feature_save_process({'path': out2, 'feature': f2})
print('saved!', out2)
def generate_mean_var(path_directory: Path):
path_mean = Path(path_directory, 'mean.npy')
path_var = Path(path_directory, 'var.npy')
if path_mean.exists():
path_mean.unlink()
if path_var.exists():
path_var.unlink()
acoustic_feature_load_process = AcousticFeatureLoadProcess(validate=False)
acoustic_feature_save_process = AcousticFeatureSaveProcess(validate=False)
f0_list = []
spectrogram_list = []
aperiodicity_list = []
mfcc_list = []
for path in path_directory.glob('*.npy'):
feature = acoustic_feature_load_process(path)
f0_list.append(feature.f0[feature.voiced]) # remove unvoiced
spectrogram_list.append(feature.spectrogram)
aperiodicity_list.append(feature.aperiodicity)
mfcc_list.append(feature.mfcc)
def concatenate(arr_list):
try:
arr_list = numpy.concatenate(arr_list)
except:
pass
return arr_list
f0_list = concatenate(f0_list)
spectrogram_list = concatenate(spectrogram_list)
aperiodicity_list = concatenate(aperiodicity_list)
mfcc_list = concatenate(mfcc_list)
mean = AcousticFeature(
f0=numpy.mean(f0_list, axis=0, keepdims=True),
spectrogram=numpy.mean(spectrogram_list, axis=0, keepdims=True),
aperiodicity=numpy.mean(aperiodicity_list, axis=0, keepdims=True),
mfcc=numpy.mean(mfcc_list, axis=0, keepdims=True),
voiced=numpy.nan,
)
var = AcousticFeature(
f0=numpy.var(f0_list, axis=0, keepdims=True),
spectrogram=numpy.var(spectrogram_list, axis=0, keepdims=True),
aperiodicity=numpy.var(aperiodicity_list, axis=0, keepdims=True),
mfcc=numpy.var(mfcc_list, axis=0, keepdims=True),
voiced=numpy.nan,
)
acoustic_feature_save_process({'path': path_mean, 'feature': mean})
acoustic_feature_save_process({'path': path_var, 'feature': var})
def main():
paths1 = list(sorted(arguments.input1_directory.glob('*.wav')))
paths2 = list(sorted(arguments.input2_directory.glob('*.wav')))
assert len(paths1) == len(paths2)
arguments.output1_directory.mkdir(exist_ok=True)
arguments.output2_directory.mkdir(exist_ok=True)
pool = multiprocessing.Pool()
pool.starmap(generate_feature, zip(paths1, paths2), chunksize=16)
generate_mean_var(arguments.output1_directory)
generate_mean_var(arguments.output2_directory)
if __name__ == '__main__':
main()
| {"/become_yukarin/dataset/dataset.py": ["/become_yukarin/config/config.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py"], "/become_yukarin/voice_changer.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/data_struct.py", "/become_yukarin/super_resolution.py"], "/train.py": ["/become_yukarin/config/config.py", "/become_yukarin/dataset/__init__.py", "/become_yukarin/model/model.py", "/become_yukarin/updater/updater.py"], "/become_yukarin/__init__.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/super_resolution.py", "/become_yukarin/vocoder.py", "/become_yukarin/voice_changer.py"], "/become_yukarin/model/model.py": ["/become_yukarin/config/config.py"], "/become_yukarin/model/cbhg_model.py": ["/become_yukarin/config/old_config.py"], "/become_yukarin/updater/updater.py": ["/become_yukarin/config/config.py", "/become_yukarin/model/model.py"], "/tests/test_dataset.py": ["/become_yukarin/dataset/__init__.py"], "/become_yukarin/acoustic_converter.py": ["/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/model/model.py"], "/scripts/extract_spectrogram_pair.py": ["/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/scripts/extract_acoustic_feature.py": ["/become_yukarin/acoustic_converter.py", "/become_yukarin/config/config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/dataset/utility.py", "/become_yukarin/param.py"], "/scripts/super_resolution_test.py": ["/become_yukarin/__init__.py", "/become_yukarin/config/sr_config.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/config/config.py": ["/become_yukarin/param.py"], "/become_yukarin/super_resolution.py": ["/become_yukarin/config/sr_config.py", "/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py"], "/become_yukarin/vocoder.py": ["/become_yukarin/data_struct.py", "/become_yukarin/dataset/dataset.py", "/become_yukarin/param.py"], "/become_yukarin/config/sr_config.py": ["/become_yukarin/param.py"], "/become_yukarin/dataset/__init__.py": ["/become_yukarin/dataset/dataset.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.