index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
75,505 | ms394/django-blog | refs/heads/master | /blog/views.py | from django.shortcuts import render
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
from .models import Post
from .forms import PostForm
from django.urls import reverse_lazy, reverse
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
class HomePageView(ListView):
model = Post
context_object_name = 'posts'
template_name = 'home.html'
ordering = ['-created_date']
class BlogListView(LoginRequiredMixin,ListView):
model = Post
context_object_name = 'posts'
template_name = 'blog/bloglist.html'
ordering = ['-created_date']
login_url = 'login'
def get_queryset(self):
return Post.objects.order_by('-created_date')
class BlogDetailView(LoginRequiredMixin,DetailView):
model = Post
context_object_name = 'post'
template_name = 'blog/blogdetail.html'
login_url = 'login'
class BlogCreateView(LoginRequiredMixin,CreateView):
model = Post
fields = ['title', 'body', 'image']
template_name = 'blog/createblog.html'
login_url = 'login'
def form_valid(self,form):
blog = form.save(commit=False)
fs = self.request.FILES
print(fs)
user = self.request.user
blog.author = user
form.save()
return super(BlogCreateView, self).form_valid(form)
class BlogUpdateView(LoginRequiredMixin,UserPassesTestMixin, UpdateView):
model = Post
fields = ['title','body', 'image']
template_name = 'blog/blogupdate.html'
login_url = 'login'
def test_func(self):
obj = self.get_object()
return obj.author == self.request.user
class BlogDeleteView(LoginRequiredMixin,UserPassesTestMixin, DeleteView):
model = Post
template_name = 'blog/blogdelete.html'
success_url = reverse_lazy('blog:blog-list')
login_url = 'login'
def test_func(self):
obj = self.get_object()
return obj.author == self.request.user | {"/blog/views.py": ["/blog/models.py"], "/blog/urls.py": ["/blog/views.py"]} |
75,506 | ms394/django-blog | refs/heads/master | /blog/models.py | from django.db import models
from django.urls import reverse
from django.contrib.auth import get_user_model
class Comment(models.Model):
article = models.ForeignKey('Post', on_delete=models.CASCADE, related_name='comments')
comment = models.CharField(max_length=150)
author = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
def __str__(self):
return self.comment
def get_absolute_url(self):
return reverse('blog:blog-detail', args=[str(self.id)])
class Post(models.Model):
title = models.CharField(max_length=200)
author = models.ForeignKey('auth.user', on_delete=models.CASCADE)
body = models.TextField()
created_date = models.DateTimeField(auto_now_add=True)
image = models.ImageField(upload_to='postImages', null=True, blank=True)
def get_absolute_url(self):
return reverse('blog:blog-detail', args=[str(self.id)])
def __str__(self):
return self.title
| {"/blog/views.py": ["/blog/models.py"], "/blog/urls.py": ["/blog/views.py"]} |
75,507 | ms394/django-blog | refs/heads/master | /blog/urls.py | from django.urls import path
from .views import BlogListView, BlogDetailView, BlogCreateView, BlogUpdateView, BlogDeleteView, HomePageView
app_name = 'blog'
urlpatterns = [
path('', HomePageView.as_view(), name='home'),
path('blogs', BlogListView.as_view(), name='blog-list'),
path('post/<int:pk>', BlogDetailView.as_view(), name='blog-detail'),
path('createblog',BlogCreateView.as_view(),name='create-blog' ),
path('updateblog/<int:pk>', BlogUpdateView.as_view(), name='blog-update'),
path('deleteblog/<int:pk>', BlogDeleteView.as_view(), name='blog-delete')
] | {"/blog/views.py": ["/blog/models.py"], "/blog/urls.py": ["/blog/views.py"]} |
75,509 | tbjorch/ABScraper | refs/heads/master | /repository/article_repo.py | # Standard library
import logging
# internal modules
from dto import ArticleDTO
import models
from models import Session, Article
def insert_article(article_dto: ArticleDTO) -> None:
try:
session = Session()
article = Article(
id=article_dto.id, headline=article_dto.headline, body=article_dto.body
)
session.add(article)
session.commit()
session.close()
except Exception as e:
logging.error("when inserting row in article table: %s" % e)
| {"/repository/article_repo.py": ["/dto/__init__.py", "/models/__init__.py"], "/repository/url_repo.py": ["/models/__init__.py", "/dto/__init__.py"], "/service/content_scraper.py": ["/dto/__init__.py", "/service/error.py"], "/service/sitemap_scraper.py": ["/dto/__init__.py", "/service/error.py"], "/run.py": ["/service/manager.py"]} |
75,510 | tbjorch/ABScraper | refs/heads/master | /repository/url_repo.py | # Standard library
import logging
# 3rd party modules
from sqlalchemy import update
# Internal modules
import models
from models import Session, Url, Article
from dto import AddUrlDTO, UrlDTO
def insert_url(add_url_dto: AddUrlDTO) -> None:
try:
url = Url(
id=add_url_dto.id,
url=add_url_dto.url,
yearmonth=add_url_dto.yearmonth,
undesired_url=add_url_dto.undesired_url,
)
session = Session()
session.add(url)
session.commit()
session.close()
except Exception as e:
logging.error("During inserting url to db: %s" % e)
def get_all_url_ids() -> []:
session = Session()
ids = session.query(Url.id).all()
return ids
def get_unscraped_urls() -> []:
session = Session()
unscraped_urls = (
session.query(Url)
.filter(
Url.scraped_at == None,
Url.undesired_url == False,
Url.payed_content == False,
)
.all()
)
url_list = []
for url in unscraped_urls:
url_list.append(_convert_to_url_dto(url))
return url_list
def update_url(url_dto: UrlDTO) -> None:
try:
session = Session()
url: Url = session.query(Url).filter(Url.id == url_dto.id).first()
url.payed_content = url_dto.payed_content
url.scraped_at = url_dto.scraped_at
session.commit()
session.close()
except Exception as e:
logging.error("When updating row in table Urls: %s" % e)
def _convert_to_url_dto(url: Url) -> UrlDTO:
return UrlDTO(
id=url.id,
url=url.url,
yearmonth=url.yearmonth,
payed_content=url.payed_content,
undesired_url=url.undesired_url,
scraped_at=url.scraped_at,
created_at=url.created_at,
)
| {"/repository/article_repo.py": ["/dto/__init__.py", "/models/__init__.py"], "/repository/url_repo.py": ["/models/__init__.py", "/dto/__init__.py"], "/service/content_scraper.py": ["/dto/__init__.py", "/service/error.py"], "/service/sitemap_scraper.py": ["/dto/__init__.py", "/service/error.py"], "/run.py": ["/service/manager.py"]} |
75,511 | tbjorch/ABScraper | refs/heads/master | /dto/__init__.py | # Data Transfer Objects and support functions
# used to decouple the service modules from SQLAlchemy.
# The service doesn't know what interface is used with the database.
from datetime import datetime
class AddUrlDTO:
def __init__(self, id: str, url: str, yearmonth: str, undesired_url: bool):
self.id: str = id
self.url: str = url
self.yearmonth: str = yearmonth
self.undesired_url: bool = undesired_url
class ArticleDTO:
def __init__(self, id: str, headline: str, body: str):
self.id: str = id
self.headline: str = headline
self.body: str = body
class UrlDTO:
def __init__(
self,
id: str,
url: str,
yearmonth: str,
payed_content: bool,
undesired_url: bool,
scraped_at: datetime,
created_at: datetime,
):
self.id: str = id
self.url: str = url
self.yearmonth: str = yearmonth
self.payed_content: bool = payed_content
self.undesired_url: bool = undesired_url
self.scraped_at: datetime = scraped_at
self.created_at: datetime = created_at
def __repr__(self) -> str:
return (
f"News(id={self.id} "
f"url={self.url} "
f"yearmonth={self.yearmonth} "
f"payed_content={self.payed_content} "
f"undesired_url={self.undesired_url} "
f"scraped_at={self.scraped_at} "
f"created_at={self.created_at})"
)
| {"/repository/article_repo.py": ["/dto/__init__.py", "/models/__init__.py"], "/repository/url_repo.py": ["/models/__init__.py", "/dto/__init__.py"], "/service/content_scraper.py": ["/dto/__init__.py", "/service/error.py"], "/service/sitemap_scraper.py": ["/dto/__init__.py", "/service/error.py"], "/run.py": ["/service/manager.py"]} |
75,512 | tbjorch/ABScraper | refs/heads/master | /service/error.py | class UnwantedArticleException(Exception):
pass
| {"/repository/article_repo.py": ["/dto/__init__.py", "/models/__init__.py"], "/repository/url_repo.py": ["/models/__init__.py", "/dto/__init__.py"], "/service/content_scraper.py": ["/dto/__init__.py", "/service/error.py"], "/service/sitemap_scraper.py": ["/dto/__init__.py", "/service/error.py"], "/run.py": ["/service/manager.py"]} |
75,513 | tbjorch/ABScraper | refs/heads/master | /service/threader.py | # builtin packages
import threading
from queue import Queue
from datetime import datetime
from time import sleep
import logging
# 3P packages
import requests
# local modules
from service import content_scraper
from repository import url_repo
db_lock = threading.Lock()
def scraper_thread():
while True:
url = scrape_q.get()
logging.debug("scraping %s" % url.url)
content_scraper.get_news_content(url)
scrape_q.task_done()
def set_scrape_queue():
global scrape_q
scrape_q = Queue()
url_list = url_repo.get_unscraped_urls()
for url in url_list:
scrape_q.put(url)
def start_scraper(num_of_threads):
set_scrape_queue()
for x in range(num_of_threads):
t = threading.Thread(target=scraper_thread)
t.daemon = True
t.start()
scrape_q.join()
| {"/repository/article_repo.py": ["/dto/__init__.py", "/models/__init__.py"], "/repository/url_repo.py": ["/models/__init__.py", "/dto/__init__.py"], "/service/content_scraper.py": ["/dto/__init__.py", "/service/error.py"], "/service/sitemap_scraper.py": ["/dto/__init__.py", "/service/error.py"], "/run.py": ["/service/manager.py"]} |
75,514 | tbjorch/ABScraper | refs/heads/master | /service/content_scraper.py | # standard library
from datetime import datetime
from threading import Lock
import logging
# 3p
import requests
from bs4 import BeautifulSoup
# Internal modules
from dto import ArticleDTO, UrlDTO
from repository import article_repo, url_repo
from service.error import UnwantedArticleException
db_lock = Lock()
def _create_html_soup_object(url: str) -> BeautifulSoup:
try:
page = requests.get(url, timeout=3)
soup = BeautifulSoup(page.content, "html.parser")
return soup
except Exception as e:
logging.error("during get call to url %s" % url)
logging.error(e)
def _get_news_headline(soup: BeautifulSoup) -> str:
try:
headline = soup.find("h1", {"data-test-id": "headline"}).get_text()
except Exception as e:
raise e
return headline
def _get_news_body_text(soup: BeautifulSoup) -> str:
news_text = ""
# news body text at p classes c-Cz1 _2ZkCB
try:
paragraph_list = soup.find_all("p", class_="c-Cz1") + soup.find_all(
"p", class_="_2ZkCB"
)
for paragraph in paragraph_list:
if (
paragraph.find(class_="abBlueLink")
or paragraph.find(class_="abSymbPi")
or paragraph.get_text()
== "Denna text är skapad av Aftonbladets textrobot"
):
continue
else:
news_text += paragraph.get_text() + " "
except Exception as e:
raise e
return news_text
def _assert_not_payed_content(soup: BeautifulSoup) -> None:
sportbladet_paywall = soup.find(
style="background-image:url(//wwwe.aftonbladet.se/ab-se/hyperion/gfx/logo-sportbladet-plus-2019.svg)"
)
aftonbladet_paywall = soup.find(
style="background-image:url(//wwwe.aftonbladet.se/ab-se/hyperion/gfx/logo-aftonbladet-plus-2019.svg)"
)
if aftonbladet_paywall or sportbladet_paywall:
raise UnwantedArticleException()
def get_news_content(url_dto: UrlDTO) -> None:
soup: BeautifulSoup = _create_html_soup_object(url_dto.url)
try:
_assert_not_payed_content(soup)
headline = _get_news_headline(soup)
body = _get_news_body_text(soup)
article = ArticleDTO(id=url_dto.id, headline=headline, body=body)
with db_lock:
article_repo.insert_article(article)
url_dto.scraped_at = datetime.now()
url_repo.update_url(url_dto)
except UnwantedArticleException:
url_dto.payed_content = True
with db_lock:
url_repo.update_url(url_dto)
logging.warning(
"Payed content article does not allow complete scraping of %s" % url_dto.url
)
except Exception as e:
logging.error("when scraping article %s: %s" % (url_dto.url, e))
| {"/repository/article_repo.py": ["/dto/__init__.py", "/models/__init__.py"], "/repository/url_repo.py": ["/models/__init__.py", "/dto/__init__.py"], "/service/content_scraper.py": ["/dto/__init__.py", "/service/error.py"], "/service/sitemap_scraper.py": ["/dto/__init__.py", "/service/error.py"], "/run.py": ["/service/manager.py"]} |
75,515 | tbjorch/ABScraper | refs/heads/master | /service/sitemap_scraper.py | # standard library
from datetime import datetime
import logging
# 3rd party modules
import requests
from bs4 import BeautifulSoup
# internal modules
from dto import AddUrlDTO
from repository import url_repo
from service.error import UnwantedArticleException
def get_news_urls_from_sitemap(date: str):
sitemap_url: str = "https://www.aftonbladet.se/sitemaps/files/" + date + "-articles.xml"
soup = _create_xml_soup_object(sitemap_url)
value_list = []
# find all loc tags and extract the news url value into a list
for item in soup.find_all("loc"):
try:
add_url_dto = AddUrlDTO(
id=item.get_text().split("/")[
item.get_text().split("/").index("a") + 1
],
url=item.get_text(),
yearmonth=date,
undesired_url=False,
)
add_url_dto = _check_if_undesired_url(add_url_dto)
value_list.append(add_url_dto)
except UnwantedArticleException as e:
logging.warning(e)
except Exception as e:
logging.error("when scraping sitemap for url %s" % item.get_text())
return value_list
def start(date):
url_list = get_news_urls_from_sitemap(date)
counter = 0
existing_news = url_repo.get_all_url_ids()
for url in url_list:
if (url.id,) not in existing_news:
url_repo.insert_url(url)
counter += 1
logging.info("Inserted %s URLs to database" % counter)
def _check_if_undesired_url(add_url_dto: AddUrlDTO):
undesired_urls = [
"www.aftonbladet.se/autotest",
"special.aftonbladet.se",
"www.aftonbladet.se/nyheter/trafik",
"www.aftonbladet.se/sportbladet"
]
for string in undesired_urls:
if string in add_url_dto.url:
add_url_dto.undesired_url = True
return add_url_dto
def _create_xml_soup_object(url: str) -> BeautifulSoup:
page = requests.get(url, timeout=3)
soup = BeautifulSoup(page.content, "lxml")
return soup
#if __name__ == "__main__":
# date = input("Input date in YYYYMM format: ")
# start(date) | {"/repository/article_repo.py": ["/dto/__init__.py", "/models/__init__.py"], "/repository/url_repo.py": ["/models/__init__.py", "/dto/__init__.py"], "/service/content_scraper.py": ["/dto/__init__.py", "/service/error.py"], "/service/sitemap_scraper.py": ["/dto/__init__.py", "/service/error.py"], "/run.py": ["/service/manager.py"]} |
75,516 | tbjorch/ABScraper | refs/heads/master | /models/__init__.py | # standard library
from datetime import datetime
# 3rd party modules
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import (
Column,
String,
DateTime,
Text,
Boolean,
create_engine,
ForeignKey,
)
from sqlalchemy.orm import relationship, sessionmaker
Base = declarative_base()
class Url(Base): # type: ignore
__tablename__ = "Urls"
id: str = Column(String(6), primary_key=True)
url: str = Column(String(200), nullable=False, unique=True)
yearmonth: str = Column(String(6), nullable=False)
payed_content: bool = Column(Boolean, nullable=False, default=False)
undesired_url: bool = Column(Boolean, nullable=False, default=False)
scraped_at: datetime = Column(DateTime, nullable=True)
created_at: datetime = Column(DateTime, nullable=False, default=datetime.utcnow)
article = relationship("Article", uselist=False)
def __repr__(self) -> str:
return (
f"Url(id={self.id} "
f"url={self.url} "
f"yearmonth={self.yearmonth} "
f"payed_content={self.payed_content} "
f"undesired_url={self.undesired_url} "
f"scraped_at={self.scraped_at} "
f"created_at={self.created_at})"
)
class Article(Base): # type: ignore
__tablename__ = "Articles"
id: str = Column(String(6), ForeignKey("Urls.id"), primary_key=True)
headline: str = Column(String(300), nullable=False)
body: str = Column(Text, nullable=False)
created_at: datetime = Column(DateTime, nullable=False, default=datetime.utcnow)
# engine = create_engine("sqlite:///:memory:", echo=True)
engine = create_engine("postgresql://root:asd123@localhost:5432/newsdata")
Base.metadata.create_all(bind=engine)
Session = sessionmaker(bind=engine)
| {"/repository/article_repo.py": ["/dto/__init__.py", "/models/__init__.py"], "/repository/url_repo.py": ["/models/__init__.py", "/dto/__init__.py"], "/service/content_scraper.py": ["/dto/__init__.py", "/service/error.py"], "/service/sitemap_scraper.py": ["/dto/__init__.py", "/service/error.py"], "/run.py": ["/service/manager.py"]} |
75,517 | tbjorch/ABScraper | refs/heads/master | /service/manager.py | # standard library
from datetime import datetime
from time import sleep
import logging
# internal modules
from service import threader, sitemap_scraper
from repository import url_repo
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s", level="INFO")
def start_service():
logging.info("Starting scraper service")
while True:
now = datetime.now()
if now.minute % 1 == 0:
logging.info("Starting sitemap scraper")
sitemap_scraper.start(f"{now:%Y%m}")
unscraped_urls = url_repo.get_unscraped_urls()
if len(unscraped_urls) > 0:
logging.info(
"Starting content scraper to scrape %s articles"
% len(unscraped_urls)
)
threader.start_scraper(20)
sleep(60)
| {"/repository/article_repo.py": ["/dto/__init__.py", "/models/__init__.py"], "/repository/url_repo.py": ["/models/__init__.py", "/dto/__init__.py"], "/service/content_scraper.py": ["/dto/__init__.py", "/service/error.py"], "/service/sitemap_scraper.py": ["/dto/__init__.py", "/service/error.py"], "/run.py": ["/service/manager.py"]} |
75,518 | tbjorch/ABScraper | refs/heads/master | /run.py | from service.manager import start_service
start_service()
| {"/repository/article_repo.py": ["/dto/__init__.py", "/models/__init__.py"], "/repository/url_repo.py": ["/models/__init__.py", "/dto/__init__.py"], "/service/content_scraper.py": ["/dto/__init__.py", "/service/error.py"], "/service/sitemap_scraper.py": ["/dto/__init__.py", "/service/error.py"], "/run.py": ["/service/manager.py"]} |
75,536 | liyong1028826685/django-patchy | refs/heads/master | /tests/test_middleware.py | from django.test import TestCase
from django.test import Client
from django.test import override_settings
from django.views.generic import View
from django.conf.urls import url
from django.http import HttpResponse
from patchy.utils import this_thread_is_sql_monitoring
class MockShortView(View):
def get(self, request):
return HttpResponse('hello world')
class MockLongView(View):
"""Mock a long consumed request
"""
def get(self, request):
import time
time.sleep(1)
return HttpResponse('hello')
class MockIgnoreView(View):
"""Mock a view that should be ignored
"""
def get(self, request):
import time
time.sleep(2)
return HttpResponse('hello')
urlpatterns = [
url(r'^short/$', MockShortView.as_view()),
url(r'^long/$', MockLongView.as_view()),
url(r'^ignore/$', MockIgnoreView.as_view()),
]
@override_settings(ROOT_URLCONF='tests.test_middleware')
class TestRequestMiddleware(TestCase):
def setUp(self):
self.client = Client()
def test_short_request(self):
with self.settings(MIDDLEWARE_CLASSES=('patchy.middleware.LongRequestMiddleware',)):
response = self.client.get('/short/')
self.assertEqual(response.status_code, 200)
elapsed = response.get('X-ELAPSED')
self.assertTrue(float(elapsed) < 1)
def test_long_request(self):
with self.settings(MIDDLEWARE_CLASSES=('patchy.middleware.LongRequestMiddleware',)):
response = self.client.get('/long/')
self.assertEqual(response.status_code, 200)
elapsed = response.get('X-ELAPSED')
self.assertTrue(float(elapsed) > 1)
def test_ignore_request(self):
with self.settings(MIDDLEWARE_CLASSES=('patchy.middleware.LongRequestMiddleware',), PATCHY_LONG_REQUEST_IGNORE_URLS=[r'^/ignore']):
self.client.get('/ignore/')
self.assertFalse(this_thread_is_sql_monitoring())
| {"/tests/test_middleware.py": ["/patchy/utils.py"], "/patchy/middleware.py": ["/patchy/utils.py"], "/tests/test_utils.py": ["/tests/models.py", "/patchy/utils.py"]} |
75,537 | liyong1028826685/django-patchy | refs/heads/master | /patchy/utils.py | import time
import logging
import threading
from functools import wraps
from django.conf import settings
from django.db.backends.utils import CursorWrapper
logger = logging.getLogger(__name__)
_locals = threading.local()
def this_thread_is_sql_monitoring():
return getattr(_locals, 'sql_monitoring', True)
def sql_monitoring_this_thread():
_locals.sql_monitoring = True
def sql_unmonitoring_this_thread():
_locals.sql_monitoring = False
class NoSQLMonitoring(object):
def __call__(self, func):
@wraps(func)
def decorator(*args, **kw):
with self:
return func(*args, **kw)
return decorator
def __enter__(self):
_locals.patchy_outer_scope = this_thread_is_sql_monitoring()
sql_unmonitoring_this_thread()
def __exit__(self, type, value, tb):
if _locals.patchy_outer_scope:
sql_monitoring_this_thread()
no_sql_monitoring = NoSQLMonitoring()
original = CursorWrapper.execute
def long_sql_execute_wrapper(*args, **kwargs):
TIMEOUT = getattr(settings, 'PATCHY_LONG_SQL_TIMEOUT', 0.05)
try:
start = time.time()
result = original(*args, **kwargs)
return result
finally:
end = time.time()
duration = end - start
if duration > TIMEOUT and this_thread_is_sql_monitoring():
try:
message = 'SQL: (%s), Args: (%s), Execution time: %.6fs' % (args[1], args[2], duration)
except IndexError:
message = 'SQL: (%s), Args: (), Execution time: %.6fs' % (args[1], duration)
logger.error(message)
| {"/tests/test_middleware.py": ["/patchy/utils.py"], "/patchy/middleware.py": ["/patchy/utils.py"], "/tests/test_utils.py": ["/tests/models.py", "/patchy/utils.py"]} |
75,538 | liyong1028826685/django-patchy | refs/heads/master | /tests/models.py | from django.db import models
class OneFieldModel(models.Model):
char_field = models.CharField(max_length=100)
| {"/tests/test_middleware.py": ["/patchy/utils.py"], "/patchy/middleware.py": ["/patchy/utils.py"], "/tests/test_utils.py": ["/tests/models.py", "/patchy/utils.py"]} |
75,539 | liyong1028826685/django-patchy | refs/heads/master | /patchy/middleware.py | """
Custom middleware
"""
import time
import logging
import re
from django.conf import settings
from .utils import sql_unmonitoring_this_thread
logger = logging.getLogger(__name__)
class LongRequestMiddleware(object):
"""Long request middleware, remember to put it first
"""
def __init__(self):
"""Initialize timeout with PATCHY_LONG_REQUEST_TIMEOUT with default to one second
"""
self.ignore_url_patterns = getattr(settings, 'PATCHY_LONG_REQUEST_IGNORE_URLS', list())
# skip any sql timeout mornitoring if lr is ignored
self.stick_to_lr = getattr(settings, 'PATCHY_STICK_TO_LR', True)
self.timeout = getattr(settings, 'PATCHY_LONG_REQUEST_TIMEOUT', 1)
def process_request(self, request):
"""record the time in
"""
self._start = time.time()
self.url_matched = False
for url_pattern in self.ignore_url_patterns:
# if the current path in ignored url list, just ignore it
if re.match(url_pattern, request.path):
self.url_matched = True
if self.stick_to_lr:
sql_unmonitoring_this_thread()
break
def process_response(self, request, response):
"""record the time out
"""
self._end = time.time()
elapsed = self._end - self._start
if elapsed > self.timeout and not self.url_matched:
# too long and log to target
logger.error('[Long Request]Path: %s, Time: %s s' % (request.path, elapsed))
response['X-ELAPSED'] = elapsed
return response
| {"/tests/test_middleware.py": ["/patchy/utils.py"], "/patchy/middleware.py": ["/patchy/utils.py"], "/tests/test_utils.py": ["/tests/models.py", "/patchy/utils.py"]} |
75,540 | liyong1028826685/django-patchy | refs/heads/master | /tests/test_utils.py | from django.test import TestCase
from django.test import Client
from django.test import override_settings
from django.views.generic import View
from django.http import HttpResponse
from django.conf.urls import url
from .models import OneFieldModel
from patchy.utils import sql_monitoring_this_thread
class MockView(View):
def get(self, request):
num = OneFieldModel.objects.count()
return HttpResponse('The total num of records is %d.' % num)
class NoSqlMockView(View):
from patchy.utils import no_sql_monitoring
@no_sql_monitoring
def get(self, request):
return HttpResponse('hello')
urlpatterns = [
url(r'^$', MockView.as_view()),
url(r'^nosql/$', NoSqlMockView.as_view()),
]
@override_settings(ROOT_URLCONF='tests.test_utils')
@override_settings(PATCHY_LONG_SQL_TIMEOUT=0.000000001)
class TestLongSQL(TestCase):
def setUp(self):
sql_monitoring_this_thread()
self.client = Client()
def test_sql_request(self):
# replace the orig wrapper
from django.db.backends import utils
from patchy.utils import long_sql_execute_wrapper
utils.CursorWrapper.execute = long_sql_execute_wrapper
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
@override_settings(ROOT_URLCONF='tests.test_utils')
@override_settings(PATCHY_LONG_SQL_TIMEOUT=0.0000000001)
class TestIgnoreSQL(TestCase):
def setUp(self):
sql_monitoring_this_thread()
self.client = Client()
def test_no_sql_view(self):
# replace the orig wrapper
from django.db.backends import utils
from patchy.utils import long_sql_execute_wrapper
utils.CursorWrapper.execute = long_sql_execute_wrapper
response = self.client.get('/nosql/')
self.assertEqual(response.status_code, 200)
| {"/tests/test_middleware.py": ["/patchy/utils.py"], "/patchy/middleware.py": ["/patchy/utils.py"], "/tests/test_utils.py": ["/tests/models.py", "/patchy/utils.py"]} |
75,541 | maho/alcan | refs/heads/master | /main.py | import os
import time
from cymunk import BoxShape, PivotJoint, Vec2d
from kivy.app import App
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.properties import NumericProperty
from kivy.uix.screenmanager import ScreenManager
import sys
from os.path import dirname
sys.path.append(dirname(__file__))
print("sys.path=", sys.path)
from alcangame import AlcanGame
from anim import AnimObject
import defs
from element import Element
from snd import load_sounds
from utils import configure_logger
class Beam(AnimObject):
def add_body(self, dt=None):
super(Beam, self).add_body(dt=dt)
if self.body: # if obj is initialized ye
self.body.velocity_limit = 0
class Platform(AnimObject):
angle = NumericProperty(0)
def create_shape(self):
sx, sy = self.size
shape = BoxShape(self.body, sx, sy)
shape.elasticity = 0.6
shape.friction = defs.friction
shape.collision_type = self.collision_type
# shape.layers = defs.NORMAL_LAYER
return shape
class AlcanSM(ScreenManager):
#def __init__(self, *a, **kw):
# super(ScreenManager, self).__init__(*a, **kw)
# self.game_clock = None
def play(self, level):
self.game_clock = None
self.current = 'game'
if level == 'easy':
defs.explode_when_nocomb = 0.9
defs.drop_useless_chance = 0.0
defs.left_beam_fine_pos = - 130
defs.beam_speed = 10
elif level == 'medium':
defs.explode_when_nocomb = 0.5
defs.drop_useless_chance = 0.3
defs.left_beam_fine_pos = -10
defs.beam_speed = 20
elif level == 'hard':
defs.explode_when_nocomb = 0.01
defs.drop_useless_chance = 0.45
defs.left_beam_fine_pos = +5
defs.beam_speed = 60
App.get_running_app().game = AlcanGame()
self.gameuberlayout.add_widget(App.get_running_app().game)
def schedule_gameover(self):
self.game_clock = Clock.schedule_once(self.gameover, 18)
def gameover(self, dt=None):
if self.game_clock:
self.game_clock.cancel()
self.game_clock = None
game = self.gameuberlayout.children[0]
self.gameuberlayout.remove_widget(game)
game.clear()
del(game)
Element.reset()
self.current = 'main'
class AlcanApp(App):
def build(self):
# Window.size = defs.map_size
self.sm = AlcanSM()
return self.sm
def on_pause(self):
Logger.info("app: on pause calledd")
return True
def on_resume(self):
Logger.info("app: on resume called")
return True
# def on_start(self):
# Logger.info("app: on_start called")
# return True
#
# def on_stop(self):
# Logger.info("app: on_stop called")
# return True
if __name__ == '__main__':
if "DEBUG" in os.environ:
def debug_signal_handler(signal, frame):
import pudb
pudb.set_trace()
import signal
signal.signal(signal.SIGINT, debug_signal_handler)
configure_logger()
load_sounds()
AlcanApp().run()
| {"/main.py": ["/alcangame.py", "/anim.py", "/defs.py", "/element.py", "/snd.py", "/utils.py"], "/other.py": ["/anim.py", "/defs.py"], "/utils.py": ["/defs.py"], "/cannon.py": ["/anim.py", "/defs.py"], "/anim.py": ["/defs.py", "/baloon.py"], "/wizard.py": ["/anim.py", "/defs.py"], "/element.py": ["/anim.py", "/bfs.py", "/defs.py", "/utils.py", "/snd.py"], "/bfs.py": ["/element.py"], "/baloon.py": ["/anim.py", "/defs.py"], "/alcangame.py": ["/anim.py", "/baloon.py", "/cannon.py", "/defs.py", "/element.py", "/ui.py", "/wizard.py", "/other.py", "/snd.py", "/utils.py"]} |
75,542 | maho/alcan | refs/heads/master | /other.py | from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from anim import AnimObject
import defs
class Hint(BoxLayout):
pass
# def __init__(self):
# super(Hint, self).__init__()
class GameOver(AnimObject):
def __init__(self, *a, **kw):
super(GameOver, self).__init__(*a, **kw)
self.layers = -1 - defs.CARRIED_THINGS_LAYER
def on_touch_up(self, touch):
App.get_running_app().root.gameover()
class Success(BoxLayout):
def on_touch_up(self, touch):
App.get_running_app().root.gameover()
| {"/main.py": ["/alcangame.py", "/anim.py", "/defs.py", "/element.py", "/snd.py", "/utils.py"], "/other.py": ["/anim.py", "/defs.py"], "/utils.py": ["/defs.py"], "/cannon.py": ["/anim.py", "/defs.py"], "/anim.py": ["/defs.py", "/baloon.py"], "/wizard.py": ["/anim.py", "/defs.py"], "/element.py": ["/anim.py", "/bfs.py", "/defs.py", "/utils.py", "/snd.py"], "/bfs.py": ["/element.py"], "/baloon.py": ["/anim.py", "/defs.py"], "/alcangame.py": ["/anim.py", "/baloon.py", "/cannon.py", "/defs.py", "/element.py", "/ui.py", "/wizard.py", "/other.py", "/snd.py", "/utils.py"]} |
75,543 | maho/alcan | refs/heads/master | /utils.py | import logging
from random import sample
# from logging.handlers import DatagramHandler
# from logging.handlers import SysLogHandler
import weakref
from kivy.logger import Logger
import defs
def adhoco(**kwargs):
return type('adhoc_object', (object,), dict(**kwargs))
def shuffled(container):
lcon = list(container)
return sample(lcon, len(lcon))
# def observe(obj):
# try:
# observe.objs.append((weakref.ref(obj), str(obj)))
# except TypeError:
# Logger.warning("unable to observe %s"%obj)
# return obj
# observe.objs = []
# def report():
# for o, d in observe.objs:
# if o() is not None:
# Logger.info("(%s) is dead"%d)
# else:
# Logger.info("(%s) is still alive"%d)
def configure_logger():
pass
# # h = SysLogHandler(address=defs.syslog_host)
# h = DatagramHandler(*defs.syslog_host)
# h.setLevel(logging.DEBUG)
#
# rlogg = logging.getLogger()
# rlogg.addHandler(h)
# rlogg.setLevel(logging.DEBUG)
#
# logging.info("das ist info")
# logging.debug("eta diebug")
#
# Logger.info("a eta kivy's info")
| {"/main.py": ["/alcangame.py", "/anim.py", "/defs.py", "/element.py", "/snd.py", "/utils.py"], "/other.py": ["/anim.py", "/defs.py"], "/utils.py": ["/defs.py"], "/cannon.py": ["/anim.py", "/defs.py"], "/anim.py": ["/defs.py", "/baloon.py"], "/wizard.py": ["/anim.py", "/defs.py"], "/element.py": ["/anim.py", "/bfs.py", "/defs.py", "/utils.py", "/snd.py"], "/bfs.py": ["/element.py"], "/baloon.py": ["/anim.py", "/defs.py"], "/alcangame.py": ["/anim.py", "/baloon.py", "/cannon.py", "/defs.py", "/element.py", "/ui.py", "/wizard.py", "/other.py", "/snd.py", "/utils.py"]} |
75,544 | maho/alcan | refs/heads/master | /cannon.py | from math import radians
from cymunk import Vec2d
from kivy.properties import NumericProperty, ObjectProperty
from anim import AnimObject
import defs
class Cannon(AnimObject):
collision_type = 2
aim = NumericProperty(0)
offset = ObjectProperty((0, 0))
def __init__(self, *args, **kwargs):
super(Cannon, self).__init__(*args, **kwargs)
self.layers = defs.CARRIED_THINGS_LAYER
self.bullets = []
def create_shape(self):
""" make cannon a sensor """
shape = super(Cannon, self).create_shape()
shape.sensor = True
return shape
def carry_element(self, element, __dt=None):
# unbind joint from element
element.unjoint()
# move it to center of cannon
pivot = self.body.position + Vec2d(self.offset)
element.body.position = pivot
element.joint(self, pivot)
self.bullets.append(element)
def shoot(self):
if not self.bullets:
return False
impulse = Vec2d(0, defs.shoot_force)
impulse.rotate(radians(self.aim))
for x in self.bullets:
x.unjoint()
x.body.apply_impulse(impulse)
x.shape.layers = defs.SHOOTED_THINGS_LAYER
x.activate()
self.bullets = []
return True
| {"/main.py": ["/alcangame.py", "/anim.py", "/defs.py", "/element.py", "/snd.py", "/utils.py"], "/other.py": ["/anim.py", "/defs.py"], "/utils.py": ["/defs.py"], "/cannon.py": ["/anim.py", "/defs.py"], "/anim.py": ["/defs.py", "/baloon.py"], "/wizard.py": ["/anim.py", "/defs.py"], "/element.py": ["/anim.py", "/bfs.py", "/defs.py", "/utils.py", "/snd.py"], "/bfs.py": ["/element.py"], "/baloon.py": ["/anim.py", "/defs.py"], "/alcangame.py": ["/anim.py", "/baloon.py", "/cannon.py", "/defs.py", "/element.py", "/ui.py", "/wizard.py", "/other.py", "/snd.py", "/utils.py"]} |
75,545 | maho/alcan | refs/heads/master | /anim.py | from math import degrees
from cymunk import Body, Circle, Space, Segment, Vec2d
from kivy.clock import Clock
from kivy.logger import Logger
from kivy.properties import NumericProperty
from kivy.uix.widget import Widget
import defs
class PhysicsObject(object):
""" super object, which holds physics in class attributes """
space = None
bodyobjects = {}
mass = NumericProperty(10, allownone=True)
moment_of_inertia = NumericProperty('INF', allownone=True)
friction = NumericProperty(defs.friction)
def __init__(self):
if self.space is None:
self.init_physics()
@staticmethod
def init_physics():
""" instead of using space as global variable """
cls = PhysicsObject
cls.space = Space()
cls.space.gravity = defs.gravity
ra = 100
w, h = defs.map_size
for x1, y1, x2, y2, ct in [
(-100, defs.floor_level - ra, w + 100, defs.floor_level - ra, defs.BOTTOM_BOUND),
(-ra, h + 100, -ra, -100, defs.LEFT_BOUND),
(w + ra, h + 100, w + ra, -100, defs.RIGHT_BOUND)
]:
wall = Segment(cls.space.static_body, Vec2d(x1, y1), Vec2d(x2, y2), ra)
wall.elasticity = 0.6
wall.friction = defs.friction
wall.collision_type = ct
cls.space.add_static(wall)
@staticmethod
def del_physics():
cls = PhysicsObject
del(cls.space)
cls.space = None
@classmethod
def update_space(cls):
cls.space.step(1.0/20.0)
for __b, o in cls.bodyobjects.items():
o.update_to_body()
def add_to_space(self, __body, space):
space = self.space
if self.mass is not None:
space.add(self.body)
space.add(self.shape)
self.bodyobjects[self.body] = self
self.on_body_init()
def update_to_body(self):
"""
update widget position to body position
"""
p = self.body.position
self.center = tuple(p)
if hasattr(self, 'angle'):
ang = degrees(self.body.angle)
self.angle = ang
def on_body_init(self):
""" called when body is finally set up """
pass
class ClockStopper(Widget):
""" class which holds all scheduled clocks and allows to stop them all
useful if it's eg. game over """
clocks = []
def __init__(self, *args, **kwargs):
self.on_init_called = False
super(ClockStopper, self).__init__(*args, **kwargs)
self.wait_for_parent()
def wait_for_parent(self, dt=None):
if self.parent and not self.on_init_called:
# finally
self.on_init()
self.on_init_called = True
return
self.schedule_once(self.wait_for_parent)
def on_init(self):
pass
@classmethod
def schedule_once(cls, *args, **kwargs):
cls.clocks.append(Clock.schedule_once(*args, **kwargs))
cls.clocks_cleanup()
@classmethod
def schedule_interval(cls, *args, **kwargs):
cls.clocks.append(Clock.schedule_interval(*args, **kwargs))
cls.clocks_cleanup()
@classmethod
def stop_all_clocks(cls):
for event in cls.clocks:
event.cancel()
cls.clocks_cleanup()
@classmethod
def clocks_cleanup(cls):
for ev in cls.clocks[:]:
if not ev.is_triggered:
cls.clocks.remove(ev)
class AnimObject(ClockStopper, PhysicsObject):
""" base object for all animated objects in game """
collision_type = NumericProperty(0)
def __init__(self, *args, **kwargs):
super(AnimObject, self).__init__(*args, **kwargs)
self.body = None
self.layers = None
self.add_body()
def add_body(self, dt=None):
if not self.parent: # object not initialized yet
# call myself in next frame,
self.schedule_once(self.add_body)
return
if self.mass is None:
self.moment_of_inertia is None
self.body = Body(self.mass, self.moment_of_inertia)
self.body.position = self.center
self.shape = self.create_shape()
self.add_to_space(self.body, self.shape)
self.body
self.shape
def create_shape(self):
sx, sy = self.size
radius = (sx + sy)/4 # half of avg
shape = Circle(self.body, radius)
shape.elasticity = 0.6
shape.friction = self.friction
shape.collision_type = self.collision_type
if self.layers:
shape.layers = self.layers
return shape
def before_removing(self):
""" method called before removing by parent """
pass
def show_baloon(self, text, **kwargs):
from baloon import Baloon
px, py = self.pos
if py > 400:
py = 400
else:
py += 200
if self.parent: # if not have parent, then maybe it doesn't need baloon?
self.parent.add_widget(
Baloon(self, (px, py), text, **kwargs)
)
def update(self, dt):
pass
| {"/main.py": ["/alcangame.py", "/anim.py", "/defs.py", "/element.py", "/snd.py", "/utils.py"], "/other.py": ["/anim.py", "/defs.py"], "/utils.py": ["/defs.py"], "/cannon.py": ["/anim.py", "/defs.py"], "/anim.py": ["/defs.py", "/baloon.py"], "/wizard.py": ["/anim.py", "/defs.py"], "/element.py": ["/anim.py", "/bfs.py", "/defs.py", "/utils.py", "/snd.py"], "/bfs.py": ["/element.py"], "/baloon.py": ["/anim.py", "/defs.py"], "/alcangame.py": ["/anim.py", "/baloon.py", "/cannon.py", "/defs.py", "/element.py", "/ui.py", "/wizard.py", "/other.py", "/snd.py", "/utils.py"]} |
75,546 | maho/alcan | refs/heads/master | /ui.py | from kivy.clock import Clock
from kivy.logger import Logger
from kivy.uix.button import Button
def opposite(x, y):
return x < 0 and y > 0 or x > 0 and y < 0
class IntroLabel(Button):
def __init__(self, *a, **kw):
super().__init__(*a, **kw)
def on_size(self, label, size):
Clock.schedule_once(self.adjust_size, 0)
def adjust_size(self, dt):
step = 3
while True:
w, h = self.size
tw, th = self.texture_size
Logger.debug("tw=%s w=%s th=%s h=%s step=%s", tw, w, th, h, step)
dir_dfs = -1 if (tw > w or th > h) else 1
if opposite(dir_dfs, step):
step *= -0.5
Logger.debug("step set to %s", step)
if abs(step) < 0.1:
Logger.debug("reached finish")
break
self.font_size += step
Logger.debug("font_size=%s", self.font_size)
self.texture_update()
| {"/main.py": ["/alcangame.py", "/anim.py", "/defs.py", "/element.py", "/snd.py", "/utils.py"], "/other.py": ["/anim.py", "/defs.py"], "/utils.py": ["/defs.py"], "/cannon.py": ["/anim.py", "/defs.py"], "/anim.py": ["/defs.py", "/baloon.py"], "/wizard.py": ["/anim.py", "/defs.py"], "/element.py": ["/anim.py", "/bfs.py", "/defs.py", "/utils.py", "/snd.py"], "/bfs.py": ["/element.py"], "/baloon.py": ["/anim.py", "/defs.py"], "/alcangame.py": ["/anim.py", "/baloon.py", "/cannon.py", "/defs.py", "/element.py", "/ui.py", "/wizard.py", "/other.py", "/snd.py", "/utils.py"]} |
75,547 | maho/alcan | refs/heads/master | /snd.py | from kivy.core.audio import SoundLoader
class Sounds:
merge = None
def load_sounds():
Sounds.merge = SoundLoader.load("sfx/merge.ogg")
| {"/main.py": ["/alcangame.py", "/anim.py", "/defs.py", "/element.py", "/snd.py", "/utils.py"], "/other.py": ["/anim.py", "/defs.py"], "/utils.py": ["/defs.py"], "/cannon.py": ["/anim.py", "/defs.py"], "/anim.py": ["/defs.py", "/baloon.py"], "/wizard.py": ["/anim.py", "/defs.py"], "/element.py": ["/anim.py", "/bfs.py", "/defs.py", "/utils.py", "/snd.py"], "/bfs.py": ["/element.py"], "/baloon.py": ["/anim.py", "/defs.py"], "/alcangame.py": ["/anim.py", "/baloon.py", "/cannon.py", "/defs.py", "/element.py", "/ui.py", "/wizard.py", "/other.py", "/snd.py", "/utils.py"]} |
75,548 | maho/alcan | refs/heads/master | /wizard.py | import time
from cymunk import Vec2d
from kivy.logger import Logger
from anim import AnimObject
import defs
class Wizard(AnimObject):
collision_type = 3
def __init__(self, *a, **kw):
super(Wizard, self).__init__(*a, mass=defs.wizard_mass, **kw)
self.layers = defs.NORMAL_LAYER
self.carried_elements = []
self.touching_elements = []
self.applied_force = Vec2d(0, 0)
def carry_element(self, element, __dt=None):
if time.time() - element.released_at < 1.0:
return True
# move element to "carried elements layer"
element.shape.layers = defs.CARRIED_THINGS_LAYER
# bind it to wizard
# #move element up
pivot = self.body.position + Vec2d(defs.wizard_hand)
element.body.position = pivot
element.joint(self, pivot)
self.carried_elements.append(element)
element.wizard = self
def add_body(self, dt=None):
super(Wizard, self).add_body(dt=dt)
if self.body: # if obj is initialized ye
self.body.velocity_limit = defs.wizard_max_speed
def create_shape(self):
shape = super(Wizard, self).create_shape()
shape.friction = defs.wizard_friction
return shape
def release_element(self):
if not self.carried_elements:
return False
for x in self.carried_elements[:]:
x.body.apply_impulse(defs.wizard_release_impulse)
x.unjoint()
x.shape.layers = defs.NORMAL_LAYER
x.released_at = time.time()
if self.touching_elements:
self.carry_element(self.touching_elements[0])
return True
| {"/main.py": ["/alcangame.py", "/anim.py", "/defs.py", "/element.py", "/snd.py", "/utils.py"], "/other.py": ["/anim.py", "/defs.py"], "/utils.py": ["/defs.py"], "/cannon.py": ["/anim.py", "/defs.py"], "/anim.py": ["/defs.py", "/baloon.py"], "/wizard.py": ["/anim.py", "/defs.py"], "/element.py": ["/anim.py", "/bfs.py", "/defs.py", "/utils.py", "/snd.py"], "/bfs.py": ["/element.py"], "/baloon.py": ["/anim.py", "/defs.py"], "/alcangame.py": ["/anim.py", "/baloon.py", "/cannon.py", "/defs.py", "/element.py", "/ui.py", "/wizard.py", "/other.py", "/snd.py", "/utils.py"]} |
75,549 | maho/alcan | refs/heads/master | /element.py | """ element (elementary ingredients of matter) and managing it """
from functools import partial
from random import choice, random, sample
import re
import os
from cymunk import PivotJoint
from kivy.logger import Logger
from kivy.properties import BooleanProperty, NumericProperty
from kivy.vector import Vector
from anim import AnimObject
import bfs
import defs
from utils import shuffled
from snd import Sounds
def load_elmap():
""" load elmap from data/elmap.txt"""
if load_elmap.data:
return load_elmap.data
fname = "data/elmap.txt"
if "DEBUG" in os.environ:
fname = "data/elmap-DEBUG.txt"
with open(fname) as f:
for line in f:
g = re.match(r"^(.*)=(.*)\+(.*)$", line)
if not g:
continue
c = g.group(1).strip()
a = g.group(2).strip()
b = g.group(3).strip()
key = tuple(sorted([a, b]))
assert key not in load_elmap.data, "duplicate combination %s" % key
load_elmap.data[key] = c
return load_elmap.data
load_elmap.data = {}
def combine_elements(a, b):
""" check in elmap, if (a,b) pair should generate
something else """
elmap = load_elmap()
try:
return elmap[tuple(sorted([a, b]))]
except KeyError:
return None
class Explosion(AnimObject):
""" widget/object which shows explosion on the screen """
frame = NumericProperty(1)
def __init__(self, momentum=None, *a, **kw):
super(Explosion, self).__init__(*a, **kw)
self.layers = defs.VISUAL_EFFECTS_LAYER
def update(self, dt):
if self.parent is None:
Logger.info("self=%s self.parent=%s self.frame=%s but parent is None",
self, self.parent, self.frame)
return
if self.frame > 5:
self.parent.remove_obj(self)
return
self.frame += 1
# dirty hack, how to do it otherwise?
oldsize = sum(self.size)/2
size = (100*(5 - self.frame) + 18*(self.frame-1))/4
ds = oldsize - size
self.size = (size, size)
self.pos = Vector(self.pos) + (ds/2, ds/2)
class Element(AnimObject):
""" element object (water, fire ....) """
collision_type = 1
# is activated when shooted, and then it combine with other element
activated = BooleanProperty(False)
available_elnames = {'water', 'air', 'earth', 'fire'}
present_elnames = []
shown_baloons = set()
def __init__(self, elname, activate=False, momentum=None, *a, **kw):
"""
momentum - that linear one, mass*V
"""
Logger.debug("new element kwargs=%s, momentum=%s", kw, momentum)
self.elname = elname
super(Element, self).__init__(*a, **kw)
self.imgsrc = "img/" + elname + ".png"
self.layers = defs.NORMAL_LAYER
self.wizard = None # who carry element?
self.joint_in_use = None
self.released_at = -1
self.momentum = momentum
if activate:
self.activate()
self.present_elnames.append(elname)
def __repr__(self):
return "[E:%s id=%s]" % (self.elname, id(self))
def on_init(self):
self.parent.elements_in_zone.append(self)
def on_body_init(self):
assert self.parent is not None
if self.elname not in self.shown_baloons:
self.shown_baloons.add(self.elname)
self.show_baloon(self.elname)
self.parent.set_bfs()
if self.momentum:
self.body.velocity = self.momentum / self.body.mass
def before_removing(self):
self.present_elnames.remove(self.elname)
def activate(self, __dt=None, timeout=0.3):
""" make it green and ready to react with other element """
if timeout == 'now':
self.activated = True
self.shape.layers = defs.NORMAL_LAYER
if 'activation' not in self.shown_baloons:
self.shown_baloons.add('activation')
self.show_baloon('activated \nready to reaction', size=(150, 80))
return
self.schedule_once(partial(self.activate, timeout='now'), timeout)
def joint(self, with_who, point):
self.unjoint()
self.joint_in_use = PivotJoint(self.body, with_who.body, point)
self.space.add(self.joint_in_use)
def unjoint(self):
""" remove existing joint """
if not self.joint_in_use:
return
joint = self.joint_in_use
self.joint_in_use = None
self.space.remove(joint)
if self.wizard:
self.wizard.carried_elements.remove(self)
self.wizard = None
def collide_with_another(self, element, __dt=None):
""" collide with another element. Generate combination, or explosion
or bounce (return True)
"""
if not element.activated or not self.activated:
return None
Logger.debug("collision: %s vs %s (%s vs %s)", self.elname, element.elname, self, element)
if self.parent is None:
Logger.debug("hey, my parent is still none, (and me=%s)", self)
return
new_elname = combine_elements(self.elname, element.elname)
if new_elname is None:
if random() < defs.explode_when_nocomb:
self.parent.replace_objs([self, element], Explosion, center=self.center)
self.parent.elements_in_zone.remove(element)
self.parent.elements_in_zone.remove(self)
return -1
return None
self.available_elnames.add(new_elname)
self.parent.reached_elname(new_elname)
self.parent.replace_objs([self, element], Element, new_elname, activate=True)
self.parent.elements_in_zone.remove(element)
self.parent.elements_in_zone.remove(self)
self.parent.rotate_hint()
Sounds.merge.play()
return +5
@classmethod
def random(cls, elizo):
""" generate random element from available.
Generate useful element, depending on drop_useless_chance
` elizo - elements in zone, list of Element instances
"""
Logger.debug("Element.random: elizo=%s available_elnames=%s", elizo, cls.available_elnames)
all_elnames = [x.elname for x in elizo]
green_elnames = [x.elname for x in elizo if x.activated]
white_elnames = [x.elname for x in elizo if not x.activated]
# first - check if we can just drop enything
if random() < defs.drop_useless_chance:
elname = choice(list(cls.available_elnames))
Logger.debug("elements: appear pure random element")
return Element(elname)
Logger.debug("second")
# try to drop element E (which is not in zone) which combined with GREEN elements in zone will give
# element R which is new
for x in shuffled(set(cls.available_elnames) - set(white_elnames)):
#iterate over all availables except those which lay just by wizard
# (to not duplicate them)
if cls.is_useful(x, with_elnames=green_elnames, avoid=cls.available_elnames):
return Element(elname=x)
Logger.debug("third")
# try to drop element E (which is not in zone) which combined with ANY elements in zone will give
# element R which is new
for x in shuffled(set(cls.available_elnames) - set(white_elnames)):
#iterate over all availables except those which lay just by wizard
if cls.is_useful(x, with_elnames=all_elnames, avoid=cls.available_elnames):
return Element(elname=x)
# Nothing useful, drop random
ret = choice(list(cls.available_elnames))
Logger.debug("fourth nothing useful, drop pure random(%s)", ret)
return Element(ret)
@classmethod
def steps_to_reach(cls):
""" how many inventions neccessary to reach dragon """
Logger.debug("steps_to_reach(): cls.available_elnames=%s, dest='dragon'", cls.available_elnames)
ret = bfs.bfs(cls.available_elnames, 'dragon')
Logger.debug("returned %s", ret)
return ret
@classmethod
def is_useful(cls, elname, with_elnames, avoid):
""" combine elname with each of with_elnames and check if it can
bring something new to elnames, but source element should not belong to 'avoid' """
Logger.debug("is_useful elname=%s with=%s avoid=%s", elname, with_elnames, avoid)
for x in with_elnames:
result = combine_elements(x, elname)
if not result:
continue
if result in avoid: # don't produce something we already know
continue
Logger.debug("USEFUL: elname=%s, with_elnames=%s avoid=%s: %s + %s gives %s", elname, with_elnames, avoid, elname, x, result)
return True
return False
@classmethod
def reset(cls):
cls.available_elnames = {'water', 'air', 'earth', 'fire'}
cls.present_elnames = []
cls.shown_baloons = set()
| {"/main.py": ["/alcangame.py", "/anim.py", "/defs.py", "/element.py", "/snd.py", "/utils.py"], "/other.py": ["/anim.py", "/defs.py"], "/utils.py": ["/defs.py"], "/cannon.py": ["/anim.py", "/defs.py"], "/anim.py": ["/defs.py", "/baloon.py"], "/wizard.py": ["/anim.py", "/defs.py"], "/element.py": ["/anim.py", "/bfs.py", "/defs.py", "/utils.py", "/snd.py"], "/bfs.py": ["/element.py"], "/baloon.py": ["/anim.py", "/defs.py"], "/alcangame.py": ["/anim.py", "/baloon.py", "/cannon.py", "/defs.py", "/element.py", "/ui.py", "/wizard.py", "/other.py", "/snd.py", "/utils.py"]} |
75,550 | maho/alcan | refs/heads/master | /bfs.py | from collections import defaultdict
from pprint import pprint
from random import choice
def reverse_elmap():
from element import load_elmap
elmap = load_elmap()
ret = defaultdict(list)
for (a, b), c in elmap.items():
ret[c].append((a, b))
return ret
def bfs(known, end='dragon', pamle=None):
"""
return list of steps (a,b)->c
"""
assert isinstance(known, set)
if not pamle:
pamle = reverse_elmap()
known = known.copy()
variants = pamle[end]
if not variants:
return []
retcands = []
for substrates in variants:
a, b = substrates
retcand = [(a, b)]
if a not in known or b not in known:
for e in (a, b):
steps = bfs(known.copy(), e, pamle=pamle)
retcand = steps + retcand
retcands.append(retcand)
retcands.sort(key=lambda x: len(x))
return retcands[0]
if __name__ == '__main__':
# import timeit
# print(timeit.timeit(lambda: bfs({'water', 'fire', 'air', 'earth'}, 'dragon'), number=1000))
pprint(bfs({'water', 'fire', 'air', 'earth'}, 'dragon'))
| {"/main.py": ["/alcangame.py", "/anim.py", "/defs.py", "/element.py", "/snd.py", "/utils.py"], "/other.py": ["/anim.py", "/defs.py"], "/utils.py": ["/defs.py"], "/cannon.py": ["/anim.py", "/defs.py"], "/anim.py": ["/defs.py", "/baloon.py"], "/wizard.py": ["/anim.py", "/defs.py"], "/element.py": ["/anim.py", "/bfs.py", "/defs.py", "/utils.py", "/snd.py"], "/bfs.py": ["/element.py"], "/baloon.py": ["/anim.py", "/defs.py"], "/alcangame.py": ["/anim.py", "/baloon.py", "/cannon.py", "/defs.py", "/element.py", "/ui.py", "/wizard.py", "/other.py", "/snd.py", "/utils.py"]} |
75,551 | maho/alcan | refs/heads/master | /defs.py | """ definitions/constants """
syslog_host= ("dlaptop", 5555)
fps = 20
gravity = (0, -750)
baloon_force = (300, 7900)
max_hints = 3
friction = 0.55
floor_level = -5
kill_level = -1000
wizard_mass = 200
wizard_touch_impulse_x = 1000
wizard_hand = (40, -20) # offset from center, where wizard carry things
wizard_max_speed = 400
wizard_friction = 4 * 9 / 13
wizard_release_impulse = (-100, 2200)
shoot_force = 7700
mintouchdist = 30
map_size = (1280, 720)
skip_drop_time = 3 # how much time to skip dropping new elements, before we know what is result of shot
drop_useless_chance = 0.000
drop_chance = 0.005
drop_zone = (0, 400)
num_elements_in_zone = (2, 8)
explode_when_nocomb = False # explode elements when they make impossible combination
left_beam_fine_pos = 0
beam_speed = 20 # number of pixels per minute
# constants
NORMAL_LAYER = 1
CARRIED_THINGS_LAYER = (1 << 1)
SHOOTED_THINGS_LAYER = (1 << 2)
VISUAL_EFFECTS_LAYER = (1 << 3)
PLATFORMS_LAYER = (1 << 4)
LEFT_BOUND = 1001
RIGHT_BOUND = 1002
BOTTOM_BOUND = 1003
INTRO_TEXT = """
Long time ago, in the middle ages...
The Swiss Alchemists obsessed with finding way to invent way to breed dragon, bring into life theories about transmutation elements into another.
Because of lack of philosopher's stone, the Swiss Alchemists has secretly begun construction of Large Element Collider, the powerful and complex experimental facility.
Now L.E.C. is constructed, help them to breed dragon from base elements.
"""
| {"/main.py": ["/alcangame.py", "/anim.py", "/defs.py", "/element.py", "/snd.py", "/utils.py"], "/other.py": ["/anim.py", "/defs.py"], "/utils.py": ["/defs.py"], "/cannon.py": ["/anim.py", "/defs.py"], "/anim.py": ["/defs.py", "/baloon.py"], "/wizard.py": ["/anim.py", "/defs.py"], "/element.py": ["/anim.py", "/bfs.py", "/defs.py", "/utils.py", "/snd.py"], "/bfs.py": ["/element.py"], "/baloon.py": ["/anim.py", "/defs.py"], "/alcangame.py": ["/anim.py", "/baloon.py", "/cannon.py", "/defs.py", "/element.py", "/ui.py", "/wizard.py", "/other.py", "/snd.py", "/utils.py"]} |
75,552 | maho/alcan | refs/heads/master | /baloon.py | from cymunk import DampedSpring
from kivy.clock import Clock
from kivy.properties import ListProperty, StringProperty
from anim import AnimObject
import defs
class Baloon(AnimObject):
anchor = ListProperty([0, 0])
text = StringProperty("...")
def __init__(self, object_to_follow, center, text, size=(100, 50)):
super(Baloon, self).__init__(center=center, size=size)
self.object_to_follow = object_to_follow
self.anchor = self.object_to_follow.center
self.layers = defs.VISUAL_EFFECTS_LAYER
self.text = text
self.schedule_once(self.remove, 5)
def add_body(self, dt=None):
super(Baloon, self).add_body(dt=dt)
if self.body: # if obj is initialized yet
gx, gy = defs.gravity
self.body.apply_force(defs.baloon_force)
self.joint = DampedSpring(self.body, self.object_to_follow.body,
tuple(self.pos),
tuple(self.object_to_follow.center),
130, 1.9, 1.5)
self.space.add(self.joint)
def update(self, dt):
self.anchor = self.object_to_follow.center
def remove(self, dt=None):
""" remove existing joint """
if not self.joint:
return
joint = self.joint
self.joint = None
self.space.remove(joint)
del(joint)
self.parent.remove_obj(self)
class PointsBaloon(AnimObject):
def __init__(self, center, points):
super().__init__(center=center)
self.points = points
self.layers = defs.VISUAL_EFFECTS_LAYER
self.schedule_once(self.remove, 5)
def add_body(self, dt=None):
super().add_body()
if self.body:
self.body.apply_force(defs.baloon_force)
def remove(self, dt=None):
self.parent.remove_obj(self)
| {"/main.py": ["/alcangame.py", "/anim.py", "/defs.py", "/element.py", "/snd.py", "/utils.py"], "/other.py": ["/anim.py", "/defs.py"], "/utils.py": ["/defs.py"], "/cannon.py": ["/anim.py", "/defs.py"], "/anim.py": ["/defs.py", "/baloon.py"], "/wizard.py": ["/anim.py", "/defs.py"], "/element.py": ["/anim.py", "/bfs.py", "/defs.py", "/utils.py", "/snd.py"], "/bfs.py": ["/element.py"], "/baloon.py": ["/anim.py", "/defs.py"], "/alcangame.py": ["/anim.py", "/baloon.py", "/cannon.py", "/defs.py", "/element.py", "/ui.py", "/wizard.py", "/other.py", "/snd.py", "/utils.py"]} |
75,553 | maho/alcan | refs/heads/master | /alcangame.py | from functools import partial
from collections import OrderedDict, defaultdict
import random
import time
from cymunk import Vec2d
from kivy.app import App
from kivy.base import EventLoop
from kivy.clock import Clock
from kivy.core.window import Keyboard, Window
from kivy.logger import Logger
from kivy.properties import NumericProperty, ObjectProperty
from anim import AnimObject, ClockStopper, PhysicsObject
from baloon import Baloon, PointsBaloon
from cannon import Cannon
import defs
from element import Element, load_elmap
from ui import IntroLabel
from wizard import Wizard
from other import GameOver, Hint, Success
from snd import Sounds
from utils import adhoco
class AlcanGame(ClockStopper, PhysicsObject):
bfs = NumericProperty('inf')
scale = NumericProperty(1.0)
points = NumericProperty(0)
stacklayout = ObjectProperty()
def __init__(self, *args, **kwargs):
super(AlcanGame, self).__init__(*args, **kwargs)
self.oo_to = adhoco(remove=set(), add=[])
self.elements_in_zone = []
self.keys_pressed = set()
self.game_is_over = False
self.visible_hints = OrderedDict()
self.hints_stats = defaultdict(lambda: 0)
self.skip_drop = False
self.touch_phase = None
self.left_beam_time = time.time()
EventLoop.window.bind(on_key_down=self.on_key_down, on_key_up=self.on_key_up)
self.schedule_interval(self.update, 1.0 / defs.fps)
# collision handlers
self.space.add_collision_handler(Wizard.collision_type,
Element.collision_type,
self.wizard_vs_element,
separate=self.wizard_vs_element_end)
self.space.add_collision_handler(Element.collision_type,
Cannon.collision_type,
self.cannon_vs_element)
self.space.add_collision_handler(Element.collision_type,
Element.collision_type,
self.element_vs_element)
self.space.add_collision_handler(Element.collision_type,
defs.BOTTOM_BOUND,
self.element_vs_bottom)
self.space.add_collision_handler(Wizard.collision_type,
defs.BOTTOM_BOUND,
self.wizard_vs_bottom)
Window.bind(on_resize=self.on_resize)
self.set_bfs()
self.trigger_resize()
def clear(self):
self.stop_all_clocks()
EventLoop.window.funbind('on_key_down', self.on_key_down)
EventLoop.window.funbind('on_key_up', self.on_key_up)
for x in self.children[:]:
if isinstance(x, AnimObject):
self.remove_widget(x)
self.del_physics()
def gameover(self):
if self.game_is_over:
return
self.game_is_over = True
__mw, mh = defs.map_size
self.add_widget(GameOver(pos=(400, mh), size=(600, 150)))
App.get_running_app().sm.schedule_gameover()
def on_init(self):
self.add_widget(Baloon(center=(300, 300), object_to_follow=self.wizard,
text="Alchemist"))
self.schedule_once(lambda dt: self.add_widget(Baloon(center=(400, 300), size=(200, 50),
object_to_follow=self.cannon,
text="Large Elements Collider")), 3)
def schedule_add_widget(self, oclass, *oargs, **okwargs):
self.oo_to.add.append((oclass, oargs, okwargs))
def set_bfs(self):
self.hints_to_show = Element.steps_to_reach()
self.bfs = len(self.hints_to_show)
def set_hint(self, a, b, c):
if (a, b) in self.visible_hints:
return False
hint = Hint()
self.stacklayout.add_widget(hint)
self.visible_hints[a, b] = hint
self.hints_stats[a, b] += 1
hint.a = a
hint.b = b
hint.c = c
if len(self.visible_hints) > defs.max_hints:
(a, b), hint = self.visible_hints.popitem(0)
self.stacklayout.remove_widget(hint)
return True
def rotate_hint(self):
""" calculate hint for new element appeared """
available_elements = Element.available_elnames
possible_combinations = []
elmap = load_elmap()
for (a, b) in self.hints_to_show:
c = elmap[a, b]
possible_combinations.append((a, b, c))
possible_combinations.sort(key=lambda x: self.hints_stats[x[0], x[1]])
for a, b, c in possible_combinations:
if self.set_hint(a, b, c):
break
def remove_obj(self, obj, __dt=None, just_schedule=True):
if just_schedule:
self.oo_to.remove.add(obj)
return
Logger.info("game: remove object obj=%s", obj)
obj.before_removing()
self.space.remove(obj.body)
self.space.remove(obj.shape)
self.remove_widget(obj)
del self.bodyobjects[obj.body]
def replace_objs(self, As, BClass, *Bargs, **Bkwargs):
massum = 0.0
momentum = Vec2d(0, 0)
for x in As:
massum += x.body.mass
momentum += x.body.velocity * x.body.mass
Logger.debug("momentum is %s after adding mass=%s vel=%s", momentum, x.body.velocity, x.body.mass)
self.remove_obj(x)
Bkwargs['pos'] = As[0].pos
Bkwargs['size'] = As[0].size
Bkwargs['momentum'] = momentum / len(As) # I have no idea why I should divide it by number of As.
# Afair it should work well without dividing,
self.schedule_add_widget(BClass, *Bargs, **Bkwargs)
def wizard_vs_element(self, __space, arbiter):
""" collision handler - wizard vs element """
wizard, element = [self.bodyobjects[s.body] for s in arbiter.shapes]
if isinstance(wizard, Element):
wizard, element = element, wizard
wizard.touching_elements.append(element)
if wizard.carried_elements:
return True
self.schedule_once(partial(wizard.carry_element, element))
def wizard_vs_element_end(self, __space, arbiter):
wizard, element = [self.bodyobjects[s.body] for s in arbiter.shapes]
if isinstance(wizard, Element):
wizard, element = element, wizard
wizard.touching_elements.remove(element)
if not wizard.carried_elements and wizard.touching_elements:
self.schedule_once(partial(wizard.carry_element, wizard.touching_elements[0]))
def cannon_vs_element(self, __space, arbiter):
cannon, element = [self.bodyobjects[s.body] for s in arbiter.shapes]
if isinstance(cannon, Element):
cannon, element = element, cannon
if cannon.bullets:
return True # cannot hold more than one bullet
self.schedule_once(partial(cannon.carry_element, element))
def element_vs_bottom(self, __space, arbiter):
e, bo = arbiter.shapes
if e.collision_type == defs.BOTTOM_BOUND:
e, bo = bo, e
e = self.bodyobjects[e.body]
if e.activated:
self.gameover()
self.remove_obj(e)
self.elements_in_zone.remove(e)
def element_vs_element(self, __space, arbiter):
e1, e2 = [self.bodyobjects[s.body] for s in arbiter.shapes]
# Clock.schedule_once(partial(e1.collide_with_another,e2))
retpoints = e1.collide_with_another(e2)
if retpoints:
x, y = e1.center
self.add_widget(PointsBaloon((x, y + 30), retpoints))
self.points += retpoints
return True
def wizard_vs_bottom(self, __space, arbiter):
wiz, bo = arbiter.shapes
if wiz.collision_type == defs.BOTTOM_BOUND:
wiz, bo = bo, wiz
self.gameover()
def on_key_up(self, __window, key, *__largs, **__kwargs):
code = Keyboard.keycode_to_string(None, key)
self.keys_pressed.remove(code)
def drop_carried_element(self):
self.wizard.release_element()
def shoot(self, drop=False):
if self.cannon.shoot():
self.skip_drop = True
Clock.schedule_once(lambda dt: setattr(self, 'skip_drop', False), defs.skip_drop_time)
elif drop:
self.wizard.release_element()
def on_key_down(self, window, key, *largs, **kwargs):
# very dirty hack, but: we don't have any instance of keyboard anywhere, and
# keycode_to_string should be in fact classmethod, so passing None as self is safe
code = Keyboard.keycode_to_string(None, key)
self.keys_pressed.add(code)
if code == 'spacebar':
self.shoot(drop=True)
def on_touch_down(self, touch):
touch.push()
self.current_touch = adhoco(x=touch.x, y=touch.y)
# quick and dirty, roughly like in Scatter but way simplier
touch.apply_transform_2d(lambda x,y: (x/self.scale, y/self.scale))
try:
if super().on_touch_down(touch):
return True
finally:
touch.pop()
def on_touch_move(self, touch):
if touch.is_double_tap:
return False
if not self.current_touch:
return False
# dx to current touch(start)
cdx, cdy = touch.x - self.current_touch.x, touch.y - self.current_touch.y
Logger.debug("cdx, cdy = %s, %s", cdx, cdy)
dx, dy = touch.dx, touch.dy
ix = defs.wizard_touch_impulse_x
# check if we didn't start some action in UI. Eg when user started to move wizard, it's unconvenient for him to
# aim in the same time, so we need minimum time until we allow him to do different thing
if self.touch_phase is None and abs(cdx) + abs(cdy) > defs.mintouchdist:
self.touch_phase = 'sweep' if abs(cdx) > abs(cdy) else 'aim'
Logger.debug("set self touch phase to %s", self.touch_phase)
if self.touch_phase == 'sweep':
self.wizard.body.apply_impulse((ix * dx, 0))
elif self.touch_phase == 'aim':
self.cannon.aim += dy / 2
return False
def on_touch_up(self, touch):
self.keys_pressed.clear()
self.touch_phase = None
self.current_touch = None
Logger.debug("on_touch_up ... touch_phase=None")
return super(AlcanGame, self).on_touch_up(touch)
def trigger_resize(self):
w, h = Window.size
self.on_resize(None, w, h)
def on_resize(self, __win, w, h):
mw, mh = defs.map_size
xratio = w / mw
yratio = h / mh
self.scale = min(xratio, yratio)
def update(self, dt):
self.update_space()
mi, ma = defs.num_elements_in_zone
n = sum(int(not e.activated) for e in self.elements_in_zone)
if n < mi:
self.drop_element()
if random.random() < defs.drop_chance and n < ma:
self.drop_element()
for o in self.children:
if isinstance(o, AnimObject):
o.update(dt)
for o in self.oo_to.remove:
self.remove_obj(o, just_schedule=False)
assert o not in self.children
self.oo_to.remove.clear()
for ocl, oa, okw in self.oo_to.add:
newo = ocl(*oa, **okw)
self.add_widget(newo)
self.oo_to.add[:] = []
if 'up' in self.keys_pressed:
self.cannon.aim += 3
if 'down' in self.keys_pressed:
self.cannon.aim -= 3
dx = 0
if 'left' in self.keys_pressed:
dx -= 20
if 'right' in self.keys_pressed:
dx += 20
if dx:
self.wizard.body.apply_impulse((defs.wizard_touch_impulse_x * dx, 0))
self.update_beam_pos(dt)
def update_beam_pos(self, dt):
beam_dx = 10
beam_move_dt = 60 * beam_dx / defs.beam_speed
if (time.time() - self.left_beam_time) > beam_move_dt:
px, py = self.left_beam.body.position
self.left_beam.body.position = (px + beam_dx, py)
self.left_beam_time = time.time()
def drop_element(self):
"""
drop element from heaven
but check if there is no drop blockade
"""
if self.skip_drop:
return
_w, h = self.size
# get proper x coordinate
x = random.randint(*defs.drop_zone)
element = Element.random(elizo=self.elements_in_zone)
if not element:
return
element.center = (x, h)
self.add_widget(element)
def reached_elname(self, elname):
if elname == "dragon":
Logger.debug("reached DRAGON!!!!!")
wi = Success(center=self.center, size=(700, 400))
self.add_widget(wi)
self.game_is_over = True
App.get_running_app().sm.schedule_gameover()
| {"/main.py": ["/alcangame.py", "/anim.py", "/defs.py", "/element.py", "/snd.py", "/utils.py"], "/other.py": ["/anim.py", "/defs.py"], "/utils.py": ["/defs.py"], "/cannon.py": ["/anim.py", "/defs.py"], "/anim.py": ["/defs.py", "/baloon.py"], "/wizard.py": ["/anim.py", "/defs.py"], "/element.py": ["/anim.py", "/bfs.py", "/defs.py", "/utils.py", "/snd.py"], "/bfs.py": ["/element.py"], "/baloon.py": ["/anim.py", "/defs.py"], "/alcangame.py": ["/anim.py", "/baloon.py", "/cannon.py", "/defs.py", "/element.py", "/ui.py", "/wizard.py", "/other.py", "/snd.py", "/utils.py"]} |
75,556 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/cem.py | import copy
import joblib
import numpy as np
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from Agents import policyIteration
from Agents.Policy import approximator, policy
from collections import deque
from collections.abc import Iterable
class CEM(policyIteration.PolicyIteration):
displayName = 'CEM'
newParameters = [policyIteration.PolicyIteration.Parameter('Sigma', 0.001, 1.0, 0.001, 0.5, True, True, "The standard deviation of additive noise"),
policyIteration.PolicyIteration.Parameter('Population Size', 0, 100, 10, 10, True, True, "The size of the sample population"),
policyIteration.PolicyIteration.Parameter('Elite Fraction', 0.001, 1.0, 0.001, 0.2, True, True, "The proportion of the elite to consider for policy improvement.")]
parameters = policyIteration.PolicyIteration.parameters + newParameters
def __init__(self, *args):
"""
Constructor for Cross Entropy Method agent.
"""
paramLen = len(CEM.newParameters)
super().__init__(*args[:-paramLen])
self.sigma, self.pop_size, self.elite_frac = args[-paramLen:]
# Convert the pop_size to an integer.
self.pop_size = int(self.pop_size)
# Calculate the number of weight sets to consider the elite.
self.elite = int(self.pop_size*self.elite_frac)
'''
Define the policy.
'''
# Create a deep learning approximator.
approx = approximator.DeepApproximator(self.state_size, self.action_size, [16])
# Create a categorical policy with a deep approximator for this agent.
self._policy = policy.CategoricalPolicy(approx)
# Weights of the policy
self._best_weights = self.sigma*np.random.randn(self._policy.count_params())
self._sample_policies = self._create_sample_policies()
self._policy.set_params(self._best_weights)
def choose_action(self, state, p: policy.Policy = None):
"""
Chooses an action given the state and, if given, a policy. The
policy p parameter is optional. If p is None, then the current
policy of the agent will be used. Otherwise, the given policy p is
used.
:param state: is the current state of the environment
:param policy: is the policy to use
:type policy: Agents.Policy.policy.Policy
:return: the chosen action
:rtype: int
"""
if (p is not None and not isinstance(p, policy.Policy)):
raise ValueError("p must be a valid policy.Policy object.")
# Initialize the action to -1.
action = -1
# Convert the state to a numpy array.
state = np.asarray(state)
# Choose an action.
if (p is None):
# Choose an action using the current policy.
action = self._policy.choose_action(state)
else:
# Choose an action using the given policy.
action = p.choose_action(state)
# Return the chosen action.
return action
def get_sample_policies(self):
"""
Returns the current list of sample policies.
:return: a list of the current sample policies
:rtype: list
"""
return self._sample_policies
def update(self, trajectory: Iterable):
"""
Updates the current policy given a the trajectory of the policy.
:param trajectory: a list of transition frames from the episode.
This represents the trajectory of the episode.
:type trajectory: Iterable
"""
if (not isinstance(trajectory, Iterable) or len(trajectory) != self.pop_size):
raise ValueError("The length of the list of trajectories should be equal to the population size.")
# Get the total episode rewards from each policy's trajectory.
rewards = np.array([sum(transition.reward for transition in policy_t) for policy_t in trajectory])
# Update the best weights based on the give rewards.
elite_idxs = rewards.argsort()[-self.elite:]
elite_weights = [self._sample_policies[i].get_params() for i in elite_idxs]
self._best_weights = np.array(elite_weights).mean(axis=0)
self._sample_policies = self._create_sample_policies()
self._policy.set_params(self._best_weights)
def _create_sample_policies(self):
"""
Creates a list of sample policies. The length of the list is equal
to the population size of this agent.
:return: a list of sample policies
:rtype: list
"""
# An empty list to add the sample policies to.
policies = []
# Create n sample policies, where n is the population size.
for i in range(self.pop_size):
# Create a new policy that is a deep copy of the current policy.
p = copy.deepcopy(self._policy)
# Derive random weights from the current weights.
sample_weights = self._best_weights + (self.sigma * np.random.randn(self._policy.count_params()))
# Set the weights of the created policy to the derived ones.
p.set_params(sample_weights)
policies.append(p)
# Return the created policies.
return policies
def save(self, filename):
mem = self._policy.get_params()
joblib.dump((CEM.displayName, mem), filename)
def load(self, filename):
name, mem = joblib.load(filename)
if name != CEM.displayName:
print('load failed')
else:
self._policy.set_params(mem)
def memsave(self):
return self._policy.get_params()
def memload(self, mem):
self._policy.set_params(mem)
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,557 | RobertCordingly/easyRL-v0 | refs/heads/master | /webpage/easyRL_app/models.py | from django.db import models
# Create your models here.
class Document(models.Model):
upload = models.FileField(verbose_name="") | {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,558 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/valueIteration.py | from Agents import modelBasedAgent
class ValueIteration(modelBasedAgent.ModelBasedAgent):
def __init__(self):
super().__init__()
self.value = []
def update(self):
super().update()
def choose_action(self, state):
super().choose_action(state)
def __deepcopy__(self, memodict={}):
pass
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,559 | RobertCordingly/easyRL-v0 | refs/heads/master | /webpage/easyRL_app/urls.py | from django.urls import path
from . import views as view
from easyRL_app import views
urlpatterns = [
path('', view.index, name="index")
,path('login/', view.login, name='login')
,path('logout/', view.logout, name='logout')
,path('train/', view.train, name='train')
,path('test/', view.test, name='test')
,path('halt/', view.halt, name='halt')
,path('poll/', view.poll, name='poll')
,path('info/', view.info, name='info')
,path('export/', view.export_model, name="export_model")
,path('import/',views.import_model.as_view(), name="import_model")
,path('upload/',views.file_upload.as_view(), name="upload")
,path('imported/file/',view.import_model_lambda, name="import_model_lambda")
]
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,560 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/adrqn.py | import numpy as np
from Agents import drqn
from Agents.Collections import ExperienceReplay
from Agents.Collections.TransitionFrame import ActionTransitionFrame
class ADRQN(drqn.DRQN):
displayName = 'ADRQN'
def __init__(self, *args):
super().__init__(*args)
empty_state = self.get_empty_state()
self.memory = ExperienceReplay.ReplayBuffer(self, self.memory_size, ActionTransitionFrame(-1, empty_state, -1, 0, empty_state, False), history_length = self.historylength)
def getRecentAction(self):
return self.memory.get_recent_action()
def choose_action(self, state):
recent_state = self.getRecentState()
recent_state = np.concatenate([recent_state[1:], [state]], 0)
recentRawActions = self.getRecentAction()
recent_action = [self.create_one_hot(self.action_size, action) for action in recentRawActions]
qval = self.predict((recent_state, recent_action), False)
action = np.argmax(qval)
return action
def addToMemory(self, state, action, reward, new_state, done):
prev_action = self.memory.peak_frame().action
self.memory.append_frame(ActionTransitionFrame(prev_action, state, action, reward, new_state, done))
def predict(self, state, isTarget):
state, action = state
stateShape = (1,) + (self.historylength,) + self.state_size
actionShape = (1,) + (self.historylength,) + (self.action_size,)
state = np.reshape(state, stateShape)
action = np.reshape(action, actionShape)
if isTarget:
result = self.target.predict([state, action, self.allMask])
else:
result = self.model.predict([state, action, self.allMask])
return result
def buildQNetwork(self):
from tensorflow.python.keras.optimizer_v2.adam import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Conv2D
from tensorflow.keras.layers import Flatten, TimeDistributed, LSTM, concatenate, multiply
input_shape = (self.historylength,) + self.state_size
inputA = Input(shape=input_shape)
inputB = Input(shape=(self.historylength, self.action_size))
inputC = Input(shape=(self.action_size,))
if len(self.state_size) == 1:
x = TimeDistributed(Dense(24, activation='relu'))(inputA)
else:
x = TimeDistributed(Conv2D(16, 8, strides=4, activation='relu'))(inputA)
x = TimeDistributed(Conv2D(32, 4, strides=2, activation='relu'))(x)
x = TimeDistributed(Flatten())(x)
x = Model(inputs=inputA, outputs=x)
y = TimeDistributed(Dense(24, activation='relu'))(inputB)
y = Model(inputs=inputB, outputs=y)
combined = concatenate([x.output, y.output])
z = LSTM(256)(combined)
z = Dense(10, activation='relu')(z) # fully connected
z = Dense(10, activation='relu')(z)
z = Dense(self.action_size)(z)
outputs = multiply([z, inputC])
inputs = [inputA, inputB, inputC]
model = Model(inputs=inputs, outputs=outputs)
model.compile(loss='mse', optimizer=Adam(lr=0.0001, clipvalue=1))
return model
def calculateTargetValues(self, mini_batch):
X_train = [np.zeros((self.batch_size,) + (self.historylength,) + self.state_size),
np.zeros((self.batch_size,) + (self.historylength,) + (self.action_size,)),
np.zeros((self.batch_size,) + (self.action_size,))]
next_states = [np.zeros((self.batch_size,) + (self.historylength,) + self.state_size),
np.zeros((self.batch_size,) + (self.historylength,) + (self.action_size,))]
for index_rep, history in enumerate(mini_batch):
for histInd, transition in enumerate(history):
X_train[0][index_rep][histInd] = transition.state
next_states[0][index_rep][histInd] = transition.next_state
X_train[1][index_rep][histInd] = self.create_one_hot(self.action_size, transition.prev_action)
next_states[1][index_rep][histInd] = self.create_one_hot(self.action_size, transition.action)
X_train[2][index_rep] = self.create_one_hot(self.action_size, transition.action)
Y_train = np.zeros((self.batch_size,) + (self.action_size,))
qnext = self.target.predict(next_states + [self.allBatchMask])
qnext = np.amax(qnext, 1)
for index_rep, history in enumerate(mini_batch):
transition = history[-1]
if transition.is_done:
Y_train[index_rep][transition.action] = transition.reward
else:
Y_train[index_rep][transition.action] = transition.reward + qnext[index_rep] * self.gamma
return X_train, Y_train
def compute_loss(self, mini_batch, q_target: list = None):
"""
Computes the loss of each sample in the mini_batch. The loss is
calculated as the TD Error of the Q-Network Will use the given
list of q_target value if provided instead of calculating.
:param mini_batch: is the mini batch to compute the loss of.
:param q_target: is a list of q_target values to use in the
calculation of the loss. This is optional. The q_target values
will be calculated if q_target is not provided.
:type q_target: list
"""
# Get the states from the batch.
states = [np.zeros((self.batch_size,) + (self.historylength,) + self.state_size),
np.zeros((self.batch_size,) + (self.historylength,) + (self.action_size,))]
for batch_idx, history in enumerate(mini_batch):
for hist_idx, transition in enumerate(history):
states[0][batch_idx][hist_idx] = transition.state
states[1][batch_idx][hist_idx] = self.create_one_hot(self.action_size, transition.action)
# Get the actions from the batch.
actions = [history[-1].action for history in mini_batch]
'''
If the q_target is None then calculate the target q-value using the
target QNetwork.
'''
if (q_target is None):
next_states = [np.zeros((self.batch_size,) + (self.historylength,) + self.state_size),
np.zeros((self.batch_size,) + (self.historylength,) + (self.action_size,))]
for batch_idx, history in enumerate(mini_batch):
for hist_idx, transition in enumerate(history):
next_states[0][batch_idx][hist_idx] = transition.next_state
next_states[1][batch_idx][hist_idx] = self.create_one_hot(self.action_size, transition.action)
rewards = [history[-1].reward for history in mini_batch]
is_dones = np.array([history[-1].is_done for history in mini_batch]).astype(float)
q_target = self.target.predict([next_states, self.allBatchMask])
q_target = rewards + (1 - is_dones) * self.gamma * np.amax(q_target, 1)
# Get from the current q-values from the QNetwork.
q = self.model.predict([states, self.allBatchMask])
q = np.choose(actions, q.T)
# Calculate and return the loss (TD Error).
loss = (q_target - q) ** 2
return loss
def apply_hindsight(self):
'''
The hindsight replay buffer method checks for
the instance, if instance found add to the memory
'''
if (isinstance(self.memory, ExperienceReplay.HindsightReplayBuffer)):
self.memory.apply_hindsight()
class ADRQNPrioritized(ADRQN):
displayName = 'ADRQN Prioritized'
newParameters = [ADRQN.Parameter('Alpha', 0.00, 1.00, 0.001, 0.60, True, True, "The amount of prioritization that gets used.")]
parameters = ADRQN.parameters + newParameters
def __init__(self, *args):
paramLen = len(ADRQNPrioritized.newParameters)
super().__init__(*args[:-paramLen])
self.alpha = float(args[-paramLen])
empty_state = self.get_empty_state()
self.memory = ExperienceReplay.PrioritizedReplayBuffer(self, self.memory_size, ActionTransitionFrame(-1, empty_state, -1, 0, empty_state, False),
history_length = self.historylength, alpha = self.alpha)
class ADRQNHindsight(ADRQN):
displayName = 'ADRQN Hindsight'
newParameters = []
parameters = ADRQN.parameters + newParameters
def __init__(self, *args):
paramLen = len(ADRQNHindsight.newParameters)
super().__init__(*args)
empty_state = self.get_empty_state()
self.memory = ExperienceReplay.HindsightReplayBuffer(self, self.memory_size, ActionTransitionFrame(-1, empty_state, -1, 0, empty_state, False),
history_length = self.historylength)
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,561 | RobertCordingly/easyRL-v0 | refs/heads/master | /MVC/controller.py | import tkinter
from MVC import view, model, terminalView
import threading
import queue
import sys
# pip install pillow
# pip install gym
# pip install pandas
# pip install numpy
# pip install tensorflow
# pip install opencv-python
# pip install gym[atari] (if not on Windows)
# OR if on Windows:
# {
# pip install --no-index -f https://github.com/Kojoley/atari-py/releases atari_py
# pip install git+https://github.com/Kojoley/atari-py.git
# }
# pip install ttkthemes
# pip install ttkwidgets
class Controller:
def __init__(self, argv):
self.models = {}
self.viewListener = self.ViewListener(self)
self.arguments = {}
self.jobID = None
self.secretKey = None
self.accessKey = None
self.sessionToken = None
self.name = None
flagName = ""
for arg in argv:
if "--" in arg:
flagName = arg[2:]
self.arguments[flagName] = ""
elif flagName != "":
self.arguments[flagName] += arg
print("ALL Arguments: " + str(self.arguments))
# Process arguments
if "jobID" in self.arguments:
self.jobID = self.arguments["jobID"]
if "secretKey" in self.arguments:
self.secretKey = self.arguments["secretKey"]
if "accessKey" in self.arguments:
self.accessKey = self.arguments["accessKey"]
if "sessionToken" in self.arguments:
self.sessionToken = self.arguments["sessionToken"]
# Start after parsing...
if "--terminal" in argv:
self.view = terminalView.View(self.viewListener)
else:
self.view = view.View(self.viewListener)
class ViewListener:
def __init__(self, controller):
self.controller = controller
self.messageQueues = {}
def getModel(self, tabID):
curModel = self.controller.models.get(tabID)
if not curModel:
curModel = model.Model()
if (self.controller.secretKey is not None and self.controller.accessKey is not None):
curModel.createBridge(self.controller.jobID, self.controller.secretKey, self.controller.accessKey, self.controller.sessionToken)
self.controller.models[tabID] = curModel
return curModel
def getQueue(self, tabID):
curQueue = self.messageQueues.get(tabID)
if not curQueue:
curQueue = queue.Queue()
self.messageQueues[tabID] = curQueue
return curQueue
def setEnvironment(self, tabID, envClass):
model = self.getModel(tabID)
model.reset()
model.environment_class = envClass
print('loaded ' + envClass.displayName)
def setAgent(self, tabID, agentClass):
model = self.getModel(tabID)
model.reset()
model.agent_class = agentClass
print('loaded ' + agentClass.displayName)
def startTraining(self, tabID, args):
model = self.getModel(tabID)
queue = self.getQueue(tabID)
threading.Thread(target=model.run_learning, args=[queue,]+args).start()
def startTesting(self, tabID, args):
model = self.getModel(tabID)
queue = self.getQueue(tabID)
if model.agent or model.loadFilename:
threading.Thread(target=model.run_testing, args=[queue,]+args).start()
return True
else:
return False
def modelIsRunning(self, tabID):
model = self.getModel(tabID)
return model.isRunning
def halt(self, tabID):
model = self.getModel(tabID)
model.halt_learning()
def haltAll(self):
for _, model in self.controller.models.items():
model.halt_learning()
def reset(self, tabID):
model = self.getModel(tabID)
model.reset()
def close(self, tabID):
self.halt(tabID)
if self.controller.models.get(tabID):
del self.controller.models[tabID]
if self.messageQueues.get(tabID):
del self.messageQueues[tabID]
def save(self, filename, tabID):
model = self.getModel(tabID)
model.save(filename)
def load(self, filename, tabID):
model = self.getModel(tabID)
model.load(filename)
# Conventional way to write the main method
if __name__ == "__main__":
Controller()
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,562 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/qLearning.py | from Agents.Collections import qTable
import joblib
class QLearning(qTable.QTable):
displayName = 'Q Learning'
def __init__(self, *args):
super().__init__(*args)
def remember(self, state, action, reward, new_state, done=False):
prevQValue = self.getQvalue(state, action)
newQValue = self.getQvalue(new_state, self.choose_action(new_state))
if done:
target = reward
else:
target = reward + self.gamma * newQValue
loss = target - prevQValue
self.qtable[(state, action)] = prevQValue + self.alpha * loss
return loss**2
def __deepcopy__(self, memodict={}):
pass
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,563 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/Policy/approximator.py | import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from abc import ABC, abstractmethod
from collections.abc import Iterable
class Approximator(ABC):
"""
Interface for function approximator. A function approximator is a
class that approximates the value of action given a state.
"""
def __init__(self, state_size: tuple, action_size: int):
"""
Constructor for Approximator.
:param state_size: is the shape and size of the state
:type state_size: tuple
:param action_size: is the size of the action space
:type action_size: int
"""
if (not isinstance(state_size, tuple)):
raise ValueError("state_size must be a tuple of positive integers.")
if (not isinstance(action_size, int) or action_size < 1):
raise ValueError("action_size must be a positive integer.")
self.state_size = state_size
self.action_size = action_size
@abstractmethod
def __call__(self, state: np.ndarray):
"""
Approximates the value of each action in the action space given
the state.
:param state: the current state of the environment
:type state: numpy.ndarray
:return: the approximate values outputted by the approximator.
"""
pass
@abstractmethod
def count_params(self):
"""
Counts the number of parameters for this approximator.
:return: the number of parameters for this approximator
:rtype: int
"""
pass
@abstractmethod
def get_params(self, flatten: bool = True):
"""
Gets the parameters used by this approximator. The parameters
for this model is the weights and bias of each layer. The
parameters are returned as a one-dimensional numpy array if flatten
is true which is by default, otherwise the parameters are return
in the format of the model being used.
:param flatten: whether to flatten the parameters to a
one-dimensional array or not
:type param: bool
:return: the parameters used by this approximator
"""
@abstractmethod
def set_params(self, params: np.ndarray):
"""
Set the parameters of this model to the ones given in the
parameters as a numpy array. The length of the array must equal
the number of parameters used by this model. The parameters must
be flattened into a one-dimensional numpy array.
:param params: A numpy array of the parameters to set this
approximator to.
:type params: numpy.ndarray
"""
pass
@abstractmethod
def update(self, states: np.ndarray, targets: np.ndarray):
"""
Updates the approximator given a batch of states as the input and
their corresponding target values.
:param states: an array of multiple states that are the input into
approximator to approximator the value of.
:type states: numpy.ndarray
:param targets: the target values the approximator should calculate.
:type targets: numpy.ndarray
:return: the loss from training.
:rtype: float
"""
pass
@abstractmethod
def zero_grad(self):
"""
Zeros out the gradient of the model.
"""
pass
class DeepApproximator(Approximator):
"""
Artificial Neural Network implementation of a function approximator.
Can use either keras or Pytorch to construct the sequential model.
"""
# List of available machine learning libraries.
libraries = ['torch']
'''
No longer allows the use of Keras as the library as it needs more
work to be implemented by policy.py and consequently an agent that use
Deep Approximator since __call__ will return different types of tensors.
The methods that need to be tweaked in DeepApproximator to fully
support keras are __call__ (it needs to output a TensorFlow tensor),
update, and zero_grad.
Comment back in when these are updated to support keras and policy.py
and any agent using DeepApproximator can use either libraries since the
tensors are different.
'''
#libraries = ['keras', 'torch']
def __init__(self, state_size: tuple, action_size: int, hidden_sizes: Iterable = [], library: str = 'torch'):
"""
Constructor for DeepApproximator.
:param state_size: is the shape and size of the state
:type state_size: tuple
:param action_size: is the size of the action space
:type action_size: int
:param hidden_sizes: An Iterable object contain the lengths of
hidden layers to add to the architecture
:type hidden_sizes: Iterable
"""
if (not isinstance(hidden_sizes, Iterable)):
raise ValueError("hidden_sizes must be an Iterable object.")
if (library not in DeepApproximator.libraries):
raise ValueError("{} is not a valid machine learning library. Use one from the following list: {}".format(library, DeepApproximator.libraries))
# Call super constructor.
super(DeepApproximator, self).__init__(state_size, action_size)
self.hidden_sizes = hidden_sizes
self.library = library
# Construct the model of the approximator function.
self._model = None
self._optimizer = None
if (self.library == 'keras'):
self._model = self._build_keras_network()
if (self.library == 'torch'):
self._model = self._build_torch_network()
self._optimizer = optim.Adam(self._model.parameters(), lr = 0.001)
def __call__(self, state: np.ndarray):
"""
Approximates the value of each action in the action space given
the state.
:param state: the current state of the environment
:type state: numpy.ndarray
:return: the approximate values outputted by the approximator.
"""
if (not isinstance(state, np.ndarray) or state.shape != self.state_size):
raise ValueError("state must be a numpy.ndarray with shape {}".format(self.state_size))
# Initialize the values as an empty array.
values = np.empty(0)
# Approximate the values.
if (self.library == 'keras'):
# Reshape the state to input into the model.
state = np.reshape(state, (1,) + self.state_size)
# Approximate the values and reshape the values.
values = self._model.predict(state)
values = np.reshape(values, -1)
elif (self.library == 'torch'):
# Convert the state into a tensor.
state = torch.from_numpy(state).float()
# Approximate the values and convert the results to an array.
values = self._model(state)
# Return the values.
return values
def count_params(self):
"""
Counts the number of parameters for this approximator.
:return: the number of parameters for this approximator
:rtype: int
"""
if (self.library == 'keras'):
return self._model.count_params()
elif (self.library == 'torch'):
return sum(param.numel() for param in self._model.parameters())
def get_params(self, flatten: bool = True):
"""
Gets the parameters used by this approximator. The parameters
for this model is the weights and bias of each layer. The
parameters are returned as a one-dimensional numpy array if flatten
is true which is by default, otherwise the parameters are return
in the format of the model being used.
:param flatten: whether to flatten the parameters to a
one-dimensional array or not
:type param: bool
:return: the parameters used by this approximator
"""
# Get the parameters in the raw form
params = None
if (self.library == 'keras'):
params = self._model.get_weights()
elif (self.library == 'torch'):
params = self._model.parameters()
'''
If flatten is true, flatten the parameters to a one-dimensional
numpy array.
'''
if (flatten):
# Empty numpy array to append the parameters to.
flat_params = np.empty(0)
# Get the parameters from the model and append them to the array.
if (self.library == 'keras'):
for layer_params in params:
flat_params = np.append(flat_params, layer_params)
elif (self.library == 'torch'):
for layer_params in params:
flat_params = np.append(flat_params, layer_params.detach().numpy())
# Set the parameters to the flatten representation.
params = flat_params
# Return the parameters.
return params
def set_params(self, params: np.ndarray):
"""
Set the parameters of this model to the ones given in the
parameters as a numpy array. The length of the array must equal
the number of parameters used by this model. The parameters must
be flattened into a one-dimensional numpy array.
:param params: A numpy array of the parameters to set this
approximator to.
:type params: numpy.ndarray
"""
if (not isinstance(params, np.ndarray)):
raise ValueError("params must be an numpy array.")
if (params.dtype is np.float64):
raise ValueError("params must have float64 as its dtype.")
if (len(params) != self.count_params()):
raise ValueError("params must have length equal to the number of parameter used by this approximator.")
# Get the indices for the weights and bias of each layer.
layer_idxes = self._layer_idxes()
# Set the weights and bias for each layer.
if (self.library == 'keras'):
# Get the layers.
layers = self._model.layers[1:]
for layer_idx, layer in zip(layer_idxes, layers):
# Get the weights and bias for this layer.
w = np.reshape(params[layer_idx[0]: layer_idx[1]], layer.get_weights()[0].shape)
b = np.reshape(params[layer_idx[1]: layer_idx[2]], layer.get_weights()[1].shape)
# Set the weights and bias for this layer to what was given.
layer.set_weights([w, b])
elif (self.library == 'torch'):
# Get the layers.
layers = [l for l in self._model.modules() if isinstance(l, nn.Linear)]
for layer_idx, layer in zip(layer_idxes, layers):
# Get the weights and bias for this layer.
w = np.reshape(params[layer_idx[0]: layer_idx[1]], layer.weight.data.shape)
b = np.reshape(params[layer_idx[1]: layer_idx[2]], layer.bias.data.shape)
# Convert the weights and bias to tensors.
w = torch.from_numpy(w)
b = torch.from_numpy(b)
# Set the weights and bias for this layer to what was given.
layer.weight.data.copy_(w.view_as(layer.weight.data))
layer.bias.data.copy_(b.view_as(layer.bias.data))
def update(self, states: np.ndarray, targets: np.ndarray):
"""
Updates the approximator given a batch of states as the input and
their corresponding target values.
:param states: an array of multiple states that are the input into
approximator to approximator the value of.
:type states: numpy.ndarray
:param targets: the target values the approximator should calculate.
:type targets: numpy.ndarray
:return: the loss from training.
:rtype: float
"""
if (not isinstance(states, np.ndarray) or states.shape[1:] != self.state_size):
raise ValueError("states must be a numpy array with each state having the shape {}.".format(self.state_size))
if (not isinstance(targets, np.ndarray) or targets.shape[1:] != (self.action_size,)):
raise ValueError("targets must be a numpy array with each target having the shape ({},).".format(self.action_size))
# Zero out the gradients.
self._optimizer.zero_grad()
# Approximate the values of each state using the model.
approx_values = torch.zeros(len(targets), 1)
for idx, state in enumerate(states):
# Convert the state to a tensor.
state = torch.from_numpy(state).float()
# Approximate the values of the state.
approx_values[idx][0] = self._model(state)
# Calculate the loss as the MSE.
targets = torch.from_numpy(targets)
loss_fn = nn.MSELoss()
loss = loss_fn(approx_values, targets)
# Backpropagate the loss.
loss.backward()
self._optimizer.step()
# Return the loss.
return loss.item()
def zero_grad(self):
"""
Zeros out the gradient of the model.
"""
self._model.zero_grad()
def _build_keras_network(self):
"""
Constructs and returns a sequential model using Keras. The model
architecture is an Artificial Neural Network that will flatten the
input. Each hidden layer uses ReLU activation.
:return: sequential model to use as the function approximator
:rtype: torch.nn.Sequential
"""
# Import the necessary packages for keras.
from tensorflow.python.keras.optimizer_v2.adam import Adam
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Input, Flatten
# Sequential model to build the architecture for.
model = Sequential()
'''
Construct Model Architecture.
'''
# Input and flatten layers to accept and flatten the state as
# input.
model.add(Input(shape = self.state_size))
model.add(Flatten())
# Create and add n hidden layers sequentially to the architecture,
# where n is the length of hidden_sizes.
for size in self.hidden_sizes:
model.add(Dense(size, activation= 'relu'))
# Create the output layer.
model.add(Dense(self.action_size, activation= 'linear'))
# Compile and return the model.
model.compile(loss='mse', optimizer=Adam(lr=0.001))
return model
def _build_torch_network(self):
"""
Constructs and returns a sequential model using PyTorch. The model
architecture is an Artificial Neural Network that will flatten the
input. Each hidden layer uses ReLU activation.
:return: sequential model to use as the function approximator
:rtype: torch.nn.Sequential
"""
# Ordered list for the layers and other modules.
layers = []
'''
Construct Model Architecture.
'''
# Flattening layer to flatten the input if necessary.
layers.append(nn.Flatten(0, -1))
if (len(self.hidden_sizes) == 0):
# No hidden layers, connect input layer to output layer directly.
layers.append(nn.Linear(np.prod(self.state_size), self.action_size))
else:
# Construct architecture with n hidden layers, where n is the
# length of hidden_sizes.
# Create input layer and connect to first hidden layer.
layers.append(nn.Linear(np.prod(self.state_size), self.hidden_sizes[0]))
layers.append(nn.ReLU())
# Create n-1 additional hidden layers and connect them sequentially.
for i in range(len(self.hidden_sizes) - 1):
layers.append(nn.Linear(self.hidden_sizes[i], self.hidden_sizes[i + 1]))
layers.append(nn.ReLU())
# Create output layer and connect last hidden layer to it.
layers.append(nn.Linear(self.hidden_sizes[-1], self.action_size))
# Compile and return the sequential model.
return nn.Sequential(*layers)
def _layer_idxes(self):
"""
Calculates the indices tuples that point to where the weights and
bias are located in the array of parameters for each layer. The
i-th tuple represents the indices (weights_start, weights_end/
bias_start, bias_end) for the i-th layer.
:return: list of tuples that describe how to parse the weights and
bias of each layer from the parameters.
:rtype: list
"""
# Empty list for storing tuples of the indices
idxes = []
# Calculate the indice tuples
if (len(self.hidden_sizes) == 0):
# Indices of weights and bias from input to output layer.
offset = 0
fc_length = np.prod(self.state_size) * self.action_size
idxes.append((offset, offset + fc_length, offset + fc_length + self.action_size))
else:
# Indices of weights and bias from input to first hidden layer.
offset = 0
fc_length = np.prod(self.state_size) * self.hidden_sizes[0]
idxes.append((offset, offset + fc_length, offset + fc_length + self.hidden_sizes[0]))
# Indices of weights and bias from hidden layer i to hidden layer i+1.
for i in range(len(self.hidden_sizes) - 1):
offset = idxes[-1][2]
fc_length = self.hidden_sizes[i] * self.hidden_sizes[i + 1]
idxes.append((offset, offset + fc_length, offset + fc_length + self.hidden_sizes[i + 1]))
# Indices of weights and bias from last hidden layer output.
offset = idxes[-1][2]
fc_length = self.hidden_sizes[-1] * self.action_size
idxes.append((offset, offset + fc_length, offset + fc_length + self.action_size))
# Return
return idxes
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,564 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/agent.py | from abc import ABC, abstractmethod
"""This is an abstract model-free agent class that allows a user to define
their own custom agent by extending this class as a class named 'CustomAgent'.
"""
class Agent(ABC):
"""
This is a parameter class that defines a parameter of an extended agent
"""
class Parameter():
def __init__(self, name, min, max, resolution, default, hasSlider, hasTextInput, toolTipText=""):
self.name = name
self.min = min
self.max = max
self.resolution = resolution
self.default = default
self.hasSlider = hasSlider
self.hasTextInput = hasTextInput
self.toolTipText = toolTipText
parameters = [Parameter('Gamma', 0.00, 1.00, 0.001, 0.97, True, True, "The factor by which to discount future rewards")]
def __init__(self, state_size, action_size, gamma):
"""The constructor method
:param state_size: the shape of the environment state
:type state_size: tuple
:param action_size: the number of possible actions
:type action_size: int
:param gamma: the discount factor
:type gamma: float
"""
self.state_size = state_size
self.action_size = action_size
self.gamma = gamma
self.time_steps = 0
def get_empty_state(self):
"""
Gets the empty game state.
:return: A representation of an empty game state.
:rtype: list
"""
shape = self.state_size
if len(shape) >= 2:
return [[[-10000]] * shape[0] for _ in range(shape[1])]
return [-10000] * shape[0]
@abstractmethod
def choose_action(self, state):
"""Returns the action chosen by the agent's current policy given a state
:param state: the current state of the environment
:type state: tuple
:return: the action chosen by the agent
:rtype: int
"""
self.time_steps += 1
@abstractmethod
def save(self, filename):
"""Saves the agent's Q-function to a given file location
:param filename: the name of the file location to save the Q-function
:type filename: str
:return: None
:rtype: None
"""
pass
@abstractmethod
def load(self, filename):
"""Loads the agent's Q-function from a given file location
:param filename: the name of the file location from which to load the Q-function
:type filename: str
:return: None
:rtype: None
"""
pass
@abstractmethod
def memsave(self):
"""Returns a representation of the agent's Q-function
:return: a representation of the agent's Q-function
"""
pass
@abstractmethod
def memload(self, mem):
"""Loads a passed Q-function
:param mem: the Q-function to be loaded
:return: None
:rtype: None
"""
pass
def __deepcopy__(self, memodict={}):
pass
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,565 | RobertCordingly/easyRL-v0 | refs/heads/master | /Environments/acrobotEnv.py | from Environments import classicControlEnv
import gym
from PIL import Image, ImageDraw
from math import cos, sin, pi
import numpy as np
class AcrobotEnv(classicControlEnv.ClassicControlEnv):
displayName = 'Acrobot'
def __init__(self):
self.env = gym.make('Acrobot-v1')
self.action_size = self.env.action_space.n
self.state_size = self.env.observation_space.shape
def boundToScreen(self, x, y):
bound = 2.2
screen = 500
return (x+bound)*screen/(2*bound), (y+bound)*screen/(2*bound)
def rotateTrans(self, x, y, tx, ty, ang):
return tx + x * cos(-ang) + y * sin(-ang), ty - x * sin(-ang) + y * cos(-ang)
def render(self):
if self.env.state is None: return None
screen_width = 500
screen_height = 500
s = self.env.state
p1 = [-self.env.LINK_LENGTH_1 *
cos(s[0]), self.env.LINK_LENGTH_1 * sin(s[0])]
p2 = [p1[0] - self.env.LINK_LENGTH_2 * cos(s[0] + s[1]),
p1[1] + self.env.LINK_LENGTH_2 * sin(s[0] + s[1])]
xys = np.array([[0, 0], p1, p2])[:, ::-1]
thetas = [s[0] - pi / 2, s[0] + s[1] - pi / 2]
link_lengths = [self.env.LINK_LENGTH_1, self.env.LINK_LENGTH_2]
image = Image.new('RGB', (screen_width, screen_height), 'white')
draw = ImageDraw.Draw(image)
draw.line([self.boundToScreen(-2.2, 1), self.boundToScreen(2.2, 1)], fill='black')
for ((x, y), th, llen) in zip(xys, thetas, link_lengths):
l, r, t, b = 0, llen, .1, -.1
x1, y1 = self.boundToScreen(*self.rotateTrans(l, b, x, y, th))
x2, y2 = self.boundToScreen(*self.rotateTrans(l, t, x, y, th))
x3, y3 = self.boundToScreen(*self.rotateTrans(r, t, x, y, th))
x4, y4 = self.boundToScreen(*self.rotateTrans(r, b, x, y, th))
draw.polygon([(x1, y1), (x2, y2), (x3, y3), (x4, y4)], fill=(0, 204, 204))
x1,y1 = self.boundToScreen(x-0.1,y-0.1)
x2,y2 = self.boundToScreen(x+0.1, y+0.1)
draw.chord([x1, y1, x2, y2], 0, 360, fill=(204, 204, 0))
return image.transpose(method=Image.FLIP_TOP_BOTTOM)
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,566 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/rainbow.py | import joblib
import numpy as np
import random
import math
from Agents import modelFreeAgent
from Agents.deepQ import DeepQ
from Agents.Collections import ExperienceReplay
from Agents.Collections.TransitionFrame import TransitionFrame
# References:
# https://flyyufelix.github.io/2017/10/24/distributional-bellman.html
# http://github.com/flyyufelix/C51-DDQN-Keras/blob/master/c51_ddqn.py
class Rainbow(DeepQ):
displayName = 'Rainbow'
newParameters = [DeepQ.Parameter('Learning Rate', 0.00001, 100, 0.00001, 0.001, True, True, "The rate at which the parameters respond to environment observations")]
parameters = DeepQ.parameters + newParameters
def __init__(self, *args):
paramLen = len(Rainbow.newParameters)
super().__init__(*args[:-paramLen])
Qparams = []
for i in range(3):
Qparams.append(DeepQ.newParameters[i].default)
'''self.batch_size, self.memory_size, self.target_update_interval = [int(param) for param in Qparams]
#self.batch_size, self.memory_size, self.target_update_interval, _ = [int(arg) for arg in args[-paramLen:]]
_, _, _, self.learning_rate = [arg for arg in args[-paramLen:]]
self.memory = ExperienceReplay.ReplayBuffer(self, self.memory_size, TransitionFrame(empty_state, -1, 0, empty_state, False))
self.total_steps = 0
self.allMask = np.full((1, self.action_size), 1)
self.allBatchMask = np.full((self.batch_size, self.action_size), 1)'''
empty_state = self.get_empty_state()
self.total_steps = 0
self.model = self.buildQNetwork()
self.target = self.buildQNetwork()
self.lr = 0.001
self.memory = ExperienceReplay.ReplayBuffer(self, self.memory_size, TransitionFrame(empty_state, -1, 0, empty_state, False))
# Parameters used for Bellman Distribution
self.num_atoms = 51
self.v_min = -10
self.v_max = 10
self.delta_z = (self.v_max - self.v_min) / float(self.num_atoms -1)
self.z = [self.v_min + i * self.delta_z for i in range(self.num_atoms)]
self.sample_size = min(self.batch_size, self.memory_size)
# Initialize prioritization exponent
self.p = 0.5
self.allBatchMask = np.full((self.sample_size, self.num_atoms), 1)
def sample(self):
return self.memory.sample(self.batch_size)
def addToMemory(self, state, action, reward, new_state, done):
self.memory.append_frame(TransitionFrame(state, action, reward, new_state, done))
def remember(self, state, action, reward, new_state, done=False):
self.addToMemory(state, action, reward, new_state, done)
loss = 0
if len(self.memory) < 2*self.batch_size:
return loss
batch_idx, mini_batch = self.sample()
#X_train, Y_train = self.calculateTargetValues(mini_batch)
#self.model.train_on_batch(X_train, Y_train)
loss = self.agent_loss()
if (isinstance(self.memory, ExperienceReplay.PrioritizedReplayBuffer)):
errors = self.compute_loss(mini_batch)
for idx, error in batch_idx, errors:
self.memory.update_error(idx, error)
return loss
def sample_trajectories(self):
_, mini_batch = self.sample()
allStates = np.zeros(((self.sample_size, ) + self.state_size))
allNextStates = np.zeros(((self.sample_size, ) + self.state_size))
allActions = []
allRewards = []
allDones = []
for index, transition in enumerate(mini_batch):
states, actions, rewards, next_states, dones = transition
allStates[index, :] = states
allActions.append(actions)
allRewards.append(rewards)
allNextStates[index, :] = next_states
allDones.append(dones)
return allStates, allActions, allRewards, allNextStates, allDones
def choose_action(self, state):
shape = (1,) + self.state_size
state = np.reshape(state, shape)
z = self.model.predict([state])
z_concat = np.vstack(z)
q_value = np.sum(np.multiply(z_concat, np.array(self.z)), axis=1)
action = np.argmax(q_value)
return action
def agent_loss(self):
import tensorflow as tf
allStates, allActions, allRewards, allNextStates, allDones = self.sample_trajectories()
best_actions = []
probs = [np.zeros((self.sample_size, self.num_atoms)) for i in range(self.action_size)]
z = self.model.predict(allNextStates)
z_ = self.target.predict(allNextStates)
best_actions = []
z_concat = np.vstack(z)
# x = np.expand_dims(np.array(z_concat), axis=self.num_atoms)
# x = np.expand_dims(np.array(self.z), axis=self.state_size[0])
q_value = np.sum(np.multiply(z_concat, np.array(self.z)), axis=1)
q_value = q_value.reshape((self.sample_size, self.action_size), order='F')
best_actions = np.argmax(q_value, axis=1)
for i in range(self.sample_size):
if allDones[i]:
# Compute target distribution
Tz = min(self.v_max, max(self.v_min, allRewards[i]))
bj = (Tz - self.v_min) / self.delta_z
m_l, m_u = math.floor(bj), math.ceil(bj)
probs[allActions[i]][i][int(m_l)] += (m_u - bj)
probs[allActions[i]][i][int(m_u)] += (bj - m_l)
else:
for j in range(self.num_atoms):
Tz = min(self.v_max, max(self.v_min, allRewards[i] + self.gamma * self.z[j]))
bj = (Tz - self.v_min) / self.delta_z
m_l, m_u = math.floor(bj), math.ceil(bj)
probs[allActions[i]][i][int(m_l)] += z_[best_actions[i]][i][j] * (m_u - bj)
probs[allActions[i]][i][int(m_u)] += z_[best_actions[i]][i][j] * (bj - m_l)
# Computes KL loss from predicted and target distribution
loss = self.model.fit(allStates, probs, batch_size = self.sample_size, epochs = 1, verbose = 0)
'''mini = 100000
for loss in range(len(loss.history['loss'])):
if mini > loss:
mini = loss '''
losses = []
for l in range(len(loss.history['loss'])):
losses.append(l)
#print(loss.history['loss'])
return np.mean(np.array(losses))
def updateTarget(self):
if self.total_steps >= 2*self.batch_size and self.total_steps % self.target_update_interval == 0:
self.target.set_weights(self.model.get_weights())
print("target updated")
self.total_steps += 1
def buildQNetwork(self):
import tensorflow as tf
from tensorflow.python.keras.optimizer_v2.adam import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input, Flatten, multiply
inputs = Input(shape=self.state_size)
#inputs = Flatten()(inputs)
h1 = Dense(64, activation='relu')(inputs)
h2 = Dense(64, activation='relu')(h1)
outputs = []
for _ in range(self.action_size):
outputs.append(Dense(51, activation='softmax')(h2))
model = Model(inputs=inputs, outputs=outputs)
model.compile(loss='mse', optimizer=Adam(lr=0.0000625, epsilon=1.5 * 1e-4))
return model
def calculateTargetValues(self, mini_batch):
X_train = [np.zeros((self.sample_size,) + self.state_size), np.zeros((self.sample_size,) + (self.action_size,))]
next_states = np.zeros((self.sample_size,) + self.state_size)
for index_rep, transition in enumerate(mini_batch):
states, actions, rewards, _, dones = transition
X_train[0][index_rep] = transition.state
X_train[1][index_rep] = self.create_one_hot(self.action_size, transition.action)
next_states[index_rep] = transition.next_state
Y_train = np.zeros((self.sample_size,) + (self.action_size,))
z = self.target.predict(next_states)
z_concat = np.vstack(z)
qnext = np.sum(np.multiply(z_concat, np.array(self.z)), axis=1)
for index_rep, transition in enumerate(mini_batch):
if transition.is_done:
Y_train[index_rep][transition.action] = transition.reward
else:
Y_train[index_rep][transition.action] = transition.reward + qnext[index_rep] * self.gamma
#print("X train: " + str(X_train))
#print("Y train: " + str(Y_train))
return X_train, Y_train
# compute td error for priority replay buffer
def compute_loss(self, mini_batch):
errors = []
index = 0
# sample transition at recent time step
for time, sample in mini_batch:
# reshapes the state and next_state
state, next_state = sample.state, sample.next_state
next_reward = self.memory.get_transitions(time+1).reward
shape = (1,) + self.state_size
state = np.reshape(state, shape)
next_state = np.reshape(next_state, shape)
# Retrieves necessary q values to compute error
q = self.target.predict([state])
q_next = self.model.predict([next_state])
q_max = np.argmax(q_next)
# Calculates td error which is used to compute priority
error = (next_reward + self.gamma * q_max - q) ** self.p
# self.memory.update_error(index, error)
errors[index] = error
index+=1
return errors
def save(self, filename):
mem = self.model.get_weights()
joblib.dump((Rainbow.displayName, mem), filename)
def load(self, filename):
name, mem = joblib.load(filename)
if name != Rainbow.displayName:
print('load failed')
else:
self.model.set_weights(mem)
self.target.set_weights(mem)
def memsave(self):
return self.model.get_weights()
def memload(self, mem):
self.model.set_weights(mem)
self.target.set_weights(mem)
def predict(self, state, isTarget):
pass
def reset(self):
pass
def __deepcopy__(self, memodict={}):
pass
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,567 | RobertCordingly/easyRL-v0 | refs/heads/master | /Custom Agents/DeepSARSA.py | import numpy as np
from Agents.deepQ import DeepQ
from Agents.episodicExperienceBuffer import EpisodicExperienceBuffer
class CustomAgent(DeepQ):
displayName = 'Deep SARSA'
def __init__(self, *args):
super().__init__(*args)
self.memory = EpisodicExperienceBuffer(5, 655360, (np.array(self.state_size), 0, 0, None, False))
def addToMemory(self, state, action, reward, new_state, done):
self.memory.add_transition(state, action, reward, new_state, done, truncate_episode=done)
def sample(self):
return self.memory.sample_randomly_in_episode(self.batch_size, 2)
def calculateTargetValues(self, mini_batch):
states, actions, rewards, _, dones = mini_batch
X_train = [np.zeros((self.batch_size,) + self.state_size), np.zeros((self.batch_size,) + (self.action_size,))]
next_states = np.zeros((self.batch_size,) + self.state_size)
for sample_index in range(self.batch_size):
X_train[0][sample_index] = states[sample_index][0]
X_train[1][sample_index] = self.create_one_hot(self.action_size, actions[sample_index][0])
next_states[sample_index] = states[sample_index][1]
# Y_train = self.model.predict(X_train)
# qnext = self.target.predict(next_states)
Y_train = np.zeros((self.batch_size,) + (self.action_size,))
qnext = self.target.predict([next_states, self.allBatchMask])
for sample_index in range(self.batch_size):
if dones[sample_index][0]:
Y_train[sample_index][actions[sample_index][0]] = rewards[sample_index][0]
else:
Y_train[sample_index][actions[sample_index][0]] = rewards[sample_index][0] + qnext[sample_index][actions[sample_index][1]] * self.gamma
return X_train, Y_train
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,568 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/drqnConvNative.py | from Agents import modelFreeAgent
import numpy as np
from collections import deque
import random
import joblib
import cffi
import os
import pathlib
import platform
import importlib
class DRQNConvNative(modelFreeAgent.ModelFreeAgent):
displayName = 'Conv DRQN Native'
newParameters = [modelFreeAgent.ModelFreeAgent.Parameter('Batch Size', 1, 256, 1, 32, True, True, "The number of transitions to consider simultaneously when updating the agent"),
modelFreeAgent.ModelFreeAgent.Parameter('Memory Size', 1, 655360, 1, 1000, True, True, "The maximum number of timestep transitions to keep stored"),
modelFreeAgent.ModelFreeAgent.Parameter('Target Update Interval', 1, 100000, 1, 200, True, True, "The distance in timesteps between target model updates"),
modelFreeAgent.ModelFreeAgent.Parameter('History Length', 0, 20, 1, 10, True, True, "The number of recent timesteps to use as input"),
modelFreeAgent.ModelFreeAgent.Parameter('Learning Rate', 0.00001, 10, 0.00001, 0.001, True, True, "The rate at which the agent's weights are updated")]
parameters = modelFreeAgent.ModelFreeAgent.parameters + newParameters
def __init__(self, *args):
paramLen = len(DRQNConvNative.newParameters)
super().__init__(*args[:-paramLen])
self.batch_size, self.memory_size, self.target_update_interval, self.historyLength, _ = [int(arg) for arg in args[-paramLen:]]
_, _, _, _, self.learningRate = [arg for arg in args[-paramLen:]]
oldwd = pathlib.Path().absolute()
curDir = oldwd / "Agents/Native/drqnConvNative"
os.chdir(curDir.as_posix())
self.ffi = cffi.FFI()
if platform.system() == "Windows":
if not importlib.util.find_spec("Agents.Native.drqnConvNative.Release._drqnConvNative"):
self.compileLib(curDir)
import Agents.Native.drqnConvNative.Release._drqnConvNative as _drqnConvNative
else:
if not importlib.util.find_spec("Agents.Native.drqnConvNative._drqnConvNative"):
self.compileLib(curDir)
import Agents.Native.drqnConvNative._drqnConvNative as _drqnConvNative
self.nativeInterface = _drqnConvNative.lib
self.nativeDRQNConv = self.nativeInterface.createAgentc(self.state_size[2], self.state_size[0],
self.state_size[1], self.action_size,
self.gamma,
self.batch_size, self.memory_size,
self.target_update_interval, self.historyLength, self.learningRate)
# self.nativeDRQNConv = self.nativeInterface.createAgentc(self.state_size[2], self.state_size[0], self.state_size[1], self.action_size,
# self.gamma,
# self.batch_size, self.memory_size,
# self.target_update_interval, self.historyLength)
self.chooseActionFlag = False
os.chdir(oldwd.as_posix())
def compileLib(self, curDir):
headerName = curDir / "drqnConvNative.h"
outputDir = (curDir / "Release") if platform.system() == "Windows" else curDir
with open(headerName) as headerFile:
self.ffi.cdef(headerFile.read())
self.ffi.set_source(
"_drqnConvNative",
"""
#include "drqnConvNative.h"
""",
libraries=["drqnConvNative"],
library_dirs=[outputDir.as_posix()],
include_dirs=[curDir.as_posix()]
)
self.ffi.compile(verbose=True, tmpdir=outputDir)
def __del__(self):
self.nativeInterface.freeAgentc(self.nativeDRQNConv)
def choose_action(self, state):
cState = self.ffi.new("float[]", state.flatten().tolist())
action = self.nativeInterface.chooseActionc(self.nativeDRQNConv, cState)
if self.chooseActionFlag:
self.nativeInterface.rememberc(self.nativeDRQNConv, cState, 0, 0, 0, 0)
self.chooseActionFlag = True
return action
def remember(self, state, action, reward, new_state, done=False):
self.chooseActionFlag = False
cState = self.ffi.new("float[]", state.flatten().tolist())
#cNewState = self.ffi.new("float[]", new_state)
done = 1 if done else 0
loss = self.nativeInterface.rememberc(self.nativeDRQNConv, cState, action, reward, done, 1)
return loss
def update(self):
pass
def reset(self):
pass
def __deepcopy__(self, memodict={}):
pass
def save(self, filename):
cFilename = self.ffi.new("char[]", filename.encode('ascii'))
self.nativeInterface.savec(self.nativeDRQNConv, cFilename)
def load(self, filename):
cFilename = self.ffi.new("char[]", filename.encode('ascii'))
self.nativeInterface.loadc(self.nativeDRQNConv, cFilename)
def memsave(self):
return self.nativeInterface.memsavec(self.nativeDRQNConv)
def memload(self, mem):
self.nativeInterface.memloadc(self.nativeDRQNConv, mem) | {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,569 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/modelFreeAgent.py | from abc import ABC, abstractmethod
from Agents import agent
"""This is an abstract model-free agent class that allows a user to define
their own custom agent by extending this class as a class named 'CustomAgent'.
"""
class ModelFreeAgent(agent.Agent, ABC):
displayName = 'Model Free Agent'
newParameters = [agent.Agent.Parameter('Min Epsilon', 0.00, 1.00, 0.01, 0.1, True, True, "The minimum value of epsilon during training; the minimum probability that the model will select a random action over its desired one"),
agent.Agent.Parameter('Max Epsilon', 0.00, 1.00, 0.01, 1.0, True, True, "The maximum value of epsilon during training; the maximum/starting probability that the model will select a random action over its desired one"),
agent.Agent.Parameter('Decay Rate', 0.00, 0.20, 0.001, 0.018, True, True, "The amount to decrease epsilon by each timestep")]
parameters = agent.Agent.parameters + newParameters
def __init__(self, *args):
"""Constructor method
:param args: the parameters associated with the agent
:type args: tuple
"""
paramLen = len(ModelFreeAgent.newParameters)
super().__init__(*args[:-paramLen])
self.min_epsilon, self.max_epsilon, self.decay_rate = args[-paramLen:]
def apply_hindsight(self):
pass
@abstractmethod
def remember(self, state, action, reward, new_state, done):
"""'Remembers' the state and action taken during an episode
:param state: the original state of the environment
:param action: the action the agent took in the environment
:param reward: the reward the agent observed given its action
:type reward: number
:param new_state: the new state that the agent found itself after taking the action
:param episode: the episode number
:type episode: int
:param done: whether the episode was finished after taking the action
:type done: bool
:return: the MSE loss for the predicted q-values
:rtype: number
"""
pass
@abstractmethod
def reset(self):
"""Resets the agent to its original state, removing the results of any training
:return: None
:rtype: None
"""
pass
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,570 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/Collections/episodicExperienceBuffer.py | import random
class EpisodicExperienceBuffer:
def __init__(self, stored_tuple_length: int, max_length: int, default_tuple: tuple):
assert len(default_tuple) == stored_tuple_length, "Default tuple should be of size " + str(stored_tuple_length)
self.in_size = stored_tuple_length
self.max_transitions = max_length
self.default_tuple = default_tuple
self.size = 0
# Explanation of internal storage:
# self.episodes is a list of episodes. Each episode is a tuple of lists, where the number of lists is determined by the user
# The lists contain all the transitions for that episode, separated from each other
# Because of this, you will see a lot of [0]s. These are getting the first element of the tuple, usually to get the first list
# to determine its length.
self.episodes = [self.__create_empty_lists_tuple(self.in_size)]
# Pass in your transitions in the form: add_transition(a, b, c, d)
# If you wish to truncate your episode with this transition, add ', truncate_episode=False' as the last argument
def add_transition(self, *transition, truncate_episode=False):
assert isinstance(transition, tuple) and len(transition) == self.in_size, "Transition added should be of size " + str(self.in_size)
self.__mass_append(self.episodes[-1], *transition)
self.size += 1
if truncate_episode:
self.truncate_episode()
self.__check_size()
# Give batch size and history length. Method will find a random episode of at least batch_size transitions length.
# If none that large exist, a random episode will be chosen.
# For each sample in the batch, a random index from the same episode will be chosen. (If episode was of sufficient
# length then these indexes will not be repeated)
# history_length number of sequential transitions are then found starting at the randomly chosen indexes and working backwards
# (but without reversing the order of the transitions).
# Output is of the form: tuple(batch_list(history_list)), i.e. tuple_size x batch_size x history_length
# When calling this method, you should assign the returned tuple to multiple corresponding variables
# Example: observations, actions, rewards, dones, infos = sample_randomly_in_episode(#, #). Batches will match across variables
def sample_randomly_in_episode(self, batch_size: int, history_length: int) -> tuple:
assert len(self.episodes) > 1 or len(self.episodes[0][0]) > 0, "Cannot sample from an empty buffer"
substantial_episodes = [episode for episode in self.episodes if len(episode[0]) >= batch_size]
if len(substantial_episodes) < 1:
substantial_episodes = [episode for episode in self.episodes if len(episode[0]) > 0]
episode_to_sample = random.sample(substantial_episodes, 1)[0]
indexes = random.choices([x for x in range(len(episode_to_sample[0]))], k=batch_size)
else:
episode_to_sample = random.sample(substantial_episodes, 1)[0]
indexes = random.sample([x for x in range(len(episode_to_sample[0]))], batch_size)
return self.__get_batch(episode_to_sample, indexes, history_length)
# Returns a list for each element in the episode tuple, of length history_length
def __get_batch(self, episode: tuple, indexes_in_episode: list, history_length: int) -> tuple:
output = self.__create_empty_lists_tuple(self.in_size)
for tuple_list_index in range(len(output)):
current_batch = output[tuple_list_index]
for start_index in indexes_in_episode:
current_sample = []
for index in range(start_index - history_length + 1, start_index + 1):
if index < 0:
current_sample.append(self.default_tuple[tuple_list_index])
else:
current_sample.append(episode[tuple_list_index][index])
current_batch.append(current_sample)
return output
# Gets most recent transitions, history_length timesteps back. returns tuple_length x history_length, i.e. a tuple of lists of length history_length
def get_recent_transition(self, history_length: int) -> tuple:
recent_episode = self.episodes[-1]
episode_length = len(recent_episode[0])
# Gets recent transition as batch, then removes the unnecessary dimension of length 1
return tuple([tuple_list[0] for tuple_list in self.__get_batch(recent_episode, [episode_length - 1], history_length)])
def __check_size(self):
if self.size > self.max_transitions: # If too large
self.size -= self.episodes[0][0] # Reduce size by number of transitions deleted
del self.episodes[0] # Delete oldest episode
# Marks the ends the current episode in the buffer's memory
def truncate_episode(self):
self.episodes.append(self.__create_empty_lists_tuple(self.in_size))
@staticmethod
def __create_empty_lists_tuple(tuple_length: int) -> tuple:
output = tuple()
for x in range(tuple_length):
output = output + ([],)
return output
@staticmethod
def __mass_append(lists: tuple, *args):
for index in range(min(len(lists), len(args))):
lists[index].append(args[index])
def __len__(self):
return self.size
# TESTING:
# a = EpisodicExperienceBuffer(3, 40, (0, 0, 0))
# a.add_transition(1, 1, 1)
# a.add_transition(5, 5, 5, truncate_episode=True)
# a.add_transition(3, 3, 3)
# a.add_transition(4, 4, 4)
# a.add_transition(5, 5, 5, truncate_episode=True)
# a.add_transition(1, 1, 1)
# a.add_transition(2, 2, 2)
# a.add_transition(3, 3, 3)
# a.add_transition(4, 4, 4)
# a.add_transition(5, 5, 5, truncate_episode=True)
# a.add_transition(4, 4, 4)
# a.add_transition(4, 4, 4)
# a.add_transition(5, 5, 5)
# print(a.sample_randomly_in_episode(6, 3))
# print(a.sample_randomly_in_episode(3, 3))
# print(a.get_recent_transition(5))
# print(a.get_recent_transition(4))
# print(a.get_recent_transition(3))
# print(a.get_recent_transition(2))
# print(a.get_recent_transition(1))
# print(a.get_recent_transition(0)) | {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,571 | RobertCordingly/easyRL-v0 | refs/heads/master | /Custom Agents/randomAgent.py | from Agents import modelFreeAgent
import random
class CustomAgent(modelFreeAgent.ModelFreeAgent):
displayName = 'Random Agent'
def choose_action(self, state):
self.time_steps += 1
return random.randrange(self.action_size)
def remember(self, state, action, reward, new_state, done):
pass
def reset(self):
pass
def save(self, filename):
pass
def load(self, filename):
pass
def memsave(self):
pass
def memload(self, mem):
pass
def __deepcopy__(self, memodict={}):
pass | {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,572 | RobertCordingly/easyRL-v0 | refs/heads/master | /Custom Environments/SALEEnv.py | from abc import ABC
import random
from enum import Enum
from typing import List
from PIL import Image, ImageDraw
import gym
from Environments import environment
GOOD_SELLER_GOOD_SELL_CHANCE = 1
BAD_SELLER_GOOD_SELL_CHANCE = 0
GOOD_ADVISER_HONEST_CHANCE = 1
BAD_ADVISER_HONEST_CHANCE = 0
SATISFACTORY_OUTCOME_REWARD = 100
UNSATISFACTORY_OUTCOME_REWARD = -100
ADVISER_QUERY_REWARD = -1
SELLER_QUERY_REWARD = -10
class SaleEnv(gym.Env):
displayName = 'SALE-POMDP'
metadata = {'render.modes': []}
def __init__(self, tie_settler_func=None, actors: int = 5, seller_prop: float = 0.2, good_sell_prop: float = 0.5, good_adviser_prop: float = 0.8, *penalty_functions):
if tie_settler_func is None:
tie_settler_func = round_with_minimum_one_tend_first
self.ACTORS = actors
self.SELLERS, self.ADVISERS = tie_settler_func(actors * seller_prop, actors * (1 - seller_prop))
self.GOOD_SELLERS, self.BAD_SELLERS = tie_settler_func(self.SELLERS * good_sell_prop, self.SELLERS * (1 - good_sell_prop))
self.GOOD_ADVISERS, self.BAD_ADVISERS = tie_settler_func(self.ADVISERS * good_adviser_prop, self.ADVISERS * (1 - good_adviser_prop))
self.NUM_QUERY_SELLER_ACTIONS = self.SELLERS * self.ADVISERS
self.NUM_QUERY_ADVISER_ACTIONS = self.ADVISERS ** 2
self.NUM_BUY_ACTIONS = self.SELLERS
self.NUM_DNB_ACTIONS = 1
self.NUM_ACTIONS = self.NUM_QUERY_SELLER_ACTIONS + self.NUM_QUERY_ADVISER_ACTIONS + self.NUM_BUY_ACTIONS + self.NUM_DNB_ACTIONS
self.observation_space = gym.spaces.Discrete(6)
self.action_space = gym.spaces.Discrete(self.NUM_ACTIONS)
self.penalty_functions = penalty_functions
def step(self, action: int) -> (object, float, bool, dict):
total_rewards = 0
for function in self.penalty_functions:
total_rewards += function(action)
if action < self.NUM_QUERY_SELLER_ACTIONS: # Seller Query
consulted_adviser_index = action % self.ADVISERS
consulted_adviser = self.advisers[consulted_adviser_index]
seller_in_question_index = int(action / self.ADVISERS)
seller_in_question = self.sellers[seller_in_question_index]
total_rewards += SELLER_QUERY_REWARD
return consulted_adviser.advise_on_seller(seller_in_question).value, total_rewards, False, {'actor_number': seller_in_question_index, 'adviser_index': self.SELLERS + consulted_adviser_index, 'state': self.__get_state()}
elif action < self.NUM_QUERY_SELLER_ACTIONS + self.NUM_QUERY_ADVISER_ACTIONS: # Adviser Query
consulted_adviser_index = (action - self.NUM_QUERY_SELLER_ACTIONS) % self.ADVISERS
consulted_adviser = self.advisers[consulted_adviser_index]
adviser_in_question_index = int((action - self.NUM_QUERY_SELLER_ACTIONS) / self.ADVISERS)
adviser_in_question = self.advisers[adviser_in_question_index]
total_rewards += ADVISER_QUERY_REWARD
return consulted_adviser.advise_on_adviser(adviser_in_question).value, total_rewards, False, {'actor_number': self.SELLERS + adviser_in_question_index, 'adviser_index': self.SELLERS + consulted_adviser_index, 'state': self.__get_state()}
elif action < self.NUM_QUERY_SELLER_ACTIONS + self.NUM_QUERY_ADVISER_ACTIONS + self.NUM_BUY_ACTIONS: # Buy from seller
chosen_seller = self.sellers[action - (self.NUM_QUERY_SELLER_ACTIONS + self.NUM_QUERY_ADVISER_ACTIONS)]
outcome = chosen_seller.sell_product()
if outcome:
total_rewards += SATISFACTORY_OUTCOME_REWARD
else:
total_rewards += UNSATISFACTORY_OUTCOME_REWARD
self.reset()
return Observation.ended.value, total_rewards, True, {'state': self.__get_state()}
elif action < self.NUM_ACTIONS: # DNB
def __check_sellers(index: int, seller_list: List[Seller]) -> bool:
if index < len(seller_list):
return seller_list[index].good and __check_sellers(index + 1, seller_list)
good_sellers_exist = __check_sellers(0, self.sellers)
if good_sellers_exist:
total_rewards += SATISFACTORY_OUTCOME_REWARD
else:
total_rewards += UNSATISFACTORY_OUTCOME_REWARD
self.reset()
return Observation.ended.value, total_rewards, True, {'state': self.__get_state()}
else: # Bad error argument
# TODO: Error
pass
def reset(self):
self.__generate_random_state()
return Observation.none.value
def render(self, mode='human'):
pass
def close(self):
pass
def __generate_random_state(self):
self.sellers = []
self.advisers = []
good_seller = Seller(True, GOOD_SELLER_GOOD_SELL_CHANCE)
bad_seller = Seller(False, BAD_SELLER_GOOD_SELL_CHANCE)
good_adviser = Adviser(True, GOOD_ADVISER_HONEST_CHANCE)
bad_adviser = Adviser(False, BAD_ADVISER_HONEST_CHANCE)
for _ in range(self.GOOD_SELLERS):
self.sellers.append(good_seller)
for _ in range(self.BAD_SELLERS):
self.sellers.append(bad_seller)
for _ in range(self.GOOD_ADVISERS):
self.advisers.append(good_adviser)
for _ in range(self.BAD_ADVISERS):
self.advisers.append(bad_adviser)
random.shuffle(self.sellers)
random.shuffle(self.advisers)
def __get_state(self) -> list:
state = []
for seller in self.sellers:
state.append(seller.good)
for adviser in self.advisers:
state.append(adviser.trustworthy)
return state
# Observation enums to be returned by observation function
# Use Observation.<enum>.value to get associated number to convert to vector
class Observation(Enum):
good = 0
bad = 1
trustworthy = 2
untrustworthy = 3
ended = 4
none = 5
class Seller:
def __init__(self, good: bool, good_prop: float):
self.good = good
self.good_prop = good_prop
# Where True is a good product
def sell_product(self) -> bool:
return random.random() < self.good_prop
def __repr__(self):
return "Seller[Good Prop: " + str(self.good_prop) + "]"
class Adviser:
def __init__(self, trustworthy: bool, right_prop: float):
self.trustworthy = trustworthy
self.right_prop = right_prop
# Where True is a good adviser
def advise_on_adviser(self, other_adviser: 'Adviser') -> Observation:
tell_truth = random.random() < self.right_prop
other_trust = other_adviser.trustworthy
if (not tell_truth or other_trust) and (tell_truth or not other_trust):
return Observation.trustworthy
else:
return Observation.untrustworthy
# Where True is a good seller
def advise_on_seller(self, other_seller: Seller) -> Observation:
tell_truth = random.random() < self.right_prop
other_good = other_seller.good
if (not tell_truth or other_good) and (tell_truth or not other_good):
return Observation.good
else:
return Observation.bad
def __repr__(self):
return "Adviser[Right Prop: " + str(self.right_prop) + ", Trustworthy: " + str(self.trustworthy) + "]"
# Will round each to the nearest int, giving ties to the former variable.
# The first will also be rounded up to 2 (granted that b can be rounded down the same)
# The first will always be rounded up to 1 and b to 0 if fa + fb = 1
def round_with_minimum_one_tend_first(float_a, float_b):
a = round(float_a)
b = round(float_b)
if b == 0: # Make b at least 1
b += 1
if a + b > float_a + float_b:
a -= 1
if a == 0: # Make a at least 1. Overrides b's at least 1 above
a += 1
if a + b > float_a + float_b:
b -= 1
if a == 1 and b > 1: # Make a at least 2 so long as b is at least 1
a += 1
if a + b > float_a + float_b:
b -= 1
if a + b < float_a + float_b: # Increase a by 1 if by rounding chance the sum is too low
a += 1
if a + b > float_a + float_b: # Decrease b by 1 if by rounding chance the sum is too high
b -= 1
return a, b
class CustomEnv(environment.Environment):
displayName = 'Custom-SALE-POMDP'
def __init__(self):
super().__init__()
self.env = SaleEnv()
self.state = self.env.reset()
self.done = False
self.total_rewards = 0
self.state_size = (6,)
self.action_size = 18
def step(self, action):
observation, reward, self.done, info = self.env.step(action)
self.state = to_one_hot(observation, len(Observation))
return reward
def reset(self):
self.state = to_one_hot(self.env.reset(), len(Observation))
self.done = False
self.total_rewards = 0
def render(self):
return Image.new('RGB', (1, 1), 'white')
def sample_action(self):
return self.env.action_space.sample()
def to_one_hot(index: int, size: int):
return [1 if x is index else 0 for x in range(size)]
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,573 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/sarsa.py | from Agents.Collections import qTable
class sarsa(qTable.QTable):
displayName = 'SARSA'
def __init__(self, *args):
super().__init__(*args)
self.last_state = None
self.last_action = None
def remember(self, state, action, reward, new_state, _, done=False):
# SARSA requires two timesteps of history. Since by default we arent given this, we must skip one to do so
loss = 0
if self.last_state is not None and self.last_action is not None:
prevQValue = self.getQvalue(self.last_state, self.last_action)
newQValue = self.getQvalue(state, action)
if done:
target = reward
else:
target = reward + self.gamma * newQValue
loss = target - prevQValue
self.qtable[(self.last_state, self.last_action)] = prevQValue + self.alpha * loss
if done:
self.last_state = None
self.last_action = None
else:
self.last_state = state
self.last_action = action
return loss**2
def update(self):
pass
def __deepcopy__(self, memodict={}):
pass
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,574 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/Policy/policy.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from Agents.Policy.approximator import Approximator
from abc import ABC, abstractmethod
from torch.distributions import Categorical
class Policy(ABC):
"""
Interface for a policy. Uses a function approximator to approximate
the value of each action given a state and creates a probability
distributions from those values to sample an action from.
Adapted from 'https://github.com/zafarali/policy-gradient-methods/
blob/f0d83a80ddc772dcad0c851aac9bfd41d436c274/pg_methods/policies.py'.
"""
def __init__(self, approximator):
"""
Constructor for a policy.
:param approximator: is the function approximator to use
:type approximator: Agents.Policy.approximator.Approximator
"""
if (not isinstance(approximator, Approximator)):
raise ValueError("approximator must be an instance of Agents.Policy.approximator.Approximator.")
self._approximator = approximator
def count_params(self):
"""
Counts the number of parameters used in this policy.
:return: the number of parameters used in this policy
:rtype: int
"""
return self._approximator.count_params()
def get_params(self, flatten: bool = True):
"""
Gets the parameters used by this approximator. The parameters
for this model is the weights and bias of each layer. The
parameters are returned as a one-dimensional numpy array if flatten
is true which is by default, otherwise the parameters are return
in the format of the model being used.
:param flatten: whether to flatten the parameters to a
one-dimensional array or not
:type param: bool
:return: the parameters used by this approximator
"""
return self._approximator.get_params(flatten)
def set_params(self, params: np.ndarray):
"""
Set the parameters of this policy to the ones given in the
parameters as a numpy array. The length of the array must equal
the number of parameters used by this policy.
:param params: A numpy array of the parameters to set this
policy to.
:type params: numpy.ndarray
"""
self._approximator.set_params(params)
def zero_grad(self):
"""
Zeros out the gradient of the approximator.
"""
self._approximator.zero_grad()
@abstractmethod
def choose_action(self, state: np.ndarray):
"""
Chooses an action by approximating the value of each action,
creating a probability distribution from those values, and samples
the action from that probability distribution.
:param state: the state to choose an action for
:type state: numpy.ndarray
:return: the chosen action
:rtype: int
"""
pass
@abstractmethod
def get_distribution(self, states: np.ndarray, detach: bool = True):
"""
Creates a policy distribution given an array of states.
:param states: an array of states to create the policy distribution.
:type states: np.ndarray
:param detach: determines whether to detach the result from the
tensor or not. Set to True as default.
:type detach: bool
:return: the probability distribution of this policy.
:rtype: torch.distribution
"""
pass
@abstractmethod
def logit(self, state: np.ndarray, action: int):
"""
Computes the log-likelihood of taking the given action, given the
state.
:param state: the current state
:type state: numpy.array
:param action: the action being taken
:type action: int
:return: the log-likelihood of taking the action
:rtype: float
"""
pass
class CategoricalPolicy(Policy):
"""
A categorical policy. Used for choosing from a range of actions.
"""
def choose_action(self, state: np.ndarray, detach: bool = True):
"""
Chooses an action by approximating the value of each action,
creating a probability distribution from those values, and samples
the action from that probability distribution.
:param state: the state to choose an action for
:type state: numpy.ndarray
:param detach: determines whether to detach the result from the
tensor or not. Set to True as default.
:type detach: bool
:return: the chosen action
:rtype: torch.Tensor or int
"""
# Approximate the value of each action, convert results to tensor.
values = self._approximator(state)
# Use softmax to determine the probability from each value.
probs = F.softmax(values, dim=-1)
# Create a categorical policy distribution from the probabilities.
policy_dist = Categorical(probs)
# Sample an action from the policy distribution.
action = policy_dist.sample()
# If detach is true, then detach the result from the tensor.
if (detach):
action = action.item()
# Return the chosen action.
return action
def get_distribution(self, states: np.ndarray, detach: bool = True):
"""
Creates a policy distribution given an array of states.
:param states: an array of states to create the policy distribution.
:type states: np.ndarray
:param detach: determines whether to detach the result from the
tensor or not. Set to True as default.
:type detach: bool
:return: the probability distribution of this policy.
:rtype: torch.distribution
"""
if (not isinstance(states, np.ndarray) or states.shape[1:] != self._approximator.state_size):
raise ValueError("states must be a numpy array with each state having the shape {}.".format(self.state_size))
# Approximate the value of each action for each state given.
values = []
for state in states:
approx_values = self._approximator(state)
values.append(approx_values)
values = torch.stack(values)
# Use softmax to determine the probability from each value.
probs = F.softmax(values, dim=-1)
# If detach is true, then detach the result from the tensor.
if detach:
probs = probs.detach()
# Create and a categorical policy distribution from the probabilities.
policy_dist = Categorical(probs)
return policy_dist
def logit(self, state: np.ndarray, action: int, detach: bool = True):
"""
Computes the log-likelihood of taking the given action, given the
state.
:param state: the current state
:type state: numpy.ndarray
:param action: the action being taken
:type action: int
:param detach: determines whether to detach the result from the
tensor or not. Set to True as default.
:type detach: bool
:return: the log-likelihood of taking the action
:rtype: torch.Tensor or float
"""
if (not isinstance(action, int) or action not in range(self._approximator.action_size)):
raise ValueError("action must be an integer from the action space.")
# Approximate the value of each action, convert results to tensor.
values = self._approximator(state)
# Use softmax to determine the probability from each value.
probs = F.softmax(values, dim=-1)
# Create a categorical policy distribution from the probabilities.
policy_dist = Categorical(probs)
# Encapsulate action into a tensor.
action = torch.tensor([action])
# Calculate the log-likelihood of taking the given action.
logit = policy_dist.log_prob(action)
# If detach is true, then detach the result from the tensor.
if (detach):
logit = logit.item()
# Return the log-likelihood of taking the given action.
return logit
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,575 | RobertCordingly/easyRL-v0 | refs/heads/master | /lambda/download.py | # Kills in current instance.
# By Robert Cordingly
import boto3
import sys
def listInstances(ec2Client):
instances = []
response = ec2Client.describe_instances()
for reservation in response["Reservations"]:
for instance in reservation["Instances"]:
instances.append(instance)
return instances
def findOurInstance(ec2Client, jobID):
instances = listInstances(ec2Client)
for instance in instances:
if 'Tags' in instance and 'State' in instance:
if instance['State']['Name'] != 'pending' and instance['State']['Name'] != 'running':
continue
tags = instance['Tags']
for keyPair in tags:
if keyPair['Key'] == 'jobID' and keyPair['Value'] == str(jobID):
return instance
return None
def terminateInstance(ec2Client, ec2Resource, ourInstance):
if (ourInstance is not None):
instance = ec2Resource.Instance(ourInstance['InstanceId'])
instance.terminate()
path = sys.argv[1]
jobID = sys.argv[2]
accessKey = sys.argv[3]
secretKey = sys.argv[4]
if (len(sys.argv) == 6):
sessionToken = sys.argv[5]
else:
sessionToken = ""
botoSession = boto3.Session (
aws_access_key_id = accessKey,
aws_secret_access_key = secretKey,
aws_session_token = sessionToken,
region_name = 'us-east-1'
)
s3Client = botoSession.client('s3')
s3Client.download_file('easyrl-' + str(jobID), path, path) | {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,576 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/reinforceNative.py | from Agents import modelFreeAgent
import numpy as np
from collections import deque
import random
import joblib
import cffi
import os
import pathlib
import platform
import importlib
class ReinforceNative(modelFreeAgent.ModelFreeAgent):
displayName = 'Reinforce Native'
newParameters = [modelFreeAgent.ModelFreeAgent.Parameter('Policy learning rate', 0.00001, 1, 0.00001, 0.001, True, True,
"A learning rate that the Adam optimizer starts at")]
parameters = modelFreeAgent.ModelFreeAgent.parameters + newParameters
def __init__(self, *args):
paramLen = len(ReinforceNative.newParameters)
super().__init__(*args[:-paramLen])
(self.policy_lr,) = [arg for arg in args[-paramLen:]]
oldwd = pathlib.Path().absolute()
curDir = oldwd / "Agents/Native/reinforceNative"
os.chdir(curDir.as_posix())
self.ffi = cffi.FFI()
if platform.system() == "Windows":
if not importlib.util.find_spec("Agents.Native.reinforceNative.Release._reinforceNative"):
self.compileLib(curDir)
import Agents.Native.reinforceNative.Release._reinforceNative as _reinforceNative
else:
if not importlib.util.find_spec("Agents.Native.reinforceNative._reinforceNative"):
self.compileLib(curDir)
import Agents.Native.reinforceNative._reinforceNative as _reinforceNative
self.nativeInterface = _reinforceNative.lib
self.nativeReinforce = self.nativeInterface.createAgentc(self.state_size[0], self.action_size,
self.policy_lr, self.gamma)
os.chdir(oldwd.as_posix())
def compileLib(self, curDir):
headerName = curDir / "reinforce.h"
outputDir = (curDir / "Release") if platform.system() == "Windows" else curDir
with open(headerName) as headerFile:
self.ffi.cdef(headerFile.read())
self.ffi.set_source(
"_reinforceNative",
"""
#include "reinforce.h"
""",
libraries=["reinforceNative"],
library_dirs=[outputDir.as_posix()],
include_dirs=[curDir.as_posix()]
)
self.ffi.compile(verbose=True, tmpdir=outputDir)
def __del__(self):
self.nativeInterface.freeAgentc(self.nativeReinforce)
def choose_action(self, state):
cState = self.ffi.new("float[]", list(state))
action = self.nativeInterface.chooseActionc(self.nativeReinforce, cState)
return action
def remember(self, state, action, reward, new_state, done=False):
cState = self.ffi.new("float[]", list(state))
#cNewState = self.ffi.new("float[]", new_state)
loss = self.nativeInterface.rememberc(self.nativeReinforce, cState, action, reward, done)
return loss
def update(self):
pass
def reset(self):
pass
def __deepcopy__(self, memodict={}):
pass
def save(self, filename):
pass
def load(self, filename):
pass
def memsave(self):
pass
def memload(self, mem):
pass | {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,577 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/modelBasedAgent.py | from Agents import agent
from abc import ABC
class ModelBasedAgent(agent.Agent, ABC):
displayName = 'Model Based Agent'
newParameters = []
parameters = agent.Agent.parameters + newParameters
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,578 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/ppoNative.py | from Agents import modelFreeAgent
import numpy as np
from collections import deque
import random
import joblib
import cffi
import os
import pathlib
import platform
import importlib
class PPONative(modelFreeAgent.ModelFreeAgent):
displayName = 'PPO Native'
newParameters = [modelFreeAgent.ModelFreeAgent.Parameter('Batch Size', 1, 256, 1, 32, True, True, "The number of transitions to consider simultaneously when updating the agent"),
modelFreeAgent.ModelFreeAgent.Parameter('Policy learning rate', 0.00001, 1, 0.00001, 0.001, True, True,
"A learning rate that the Adam optimizer starts at"),
modelFreeAgent.ModelFreeAgent.Parameter('Value learning rate', 0.00001, 1, 0.00001, 0.001,
True, True,
"A learning rate that the Adam optimizer starts at"),
modelFreeAgent.ModelFreeAgent.Parameter('Horizon', 10, 10000, 1, 50,
True, True,
"The number of timesteps over which the returns are calculated"),
modelFreeAgent.ModelFreeAgent.Parameter('Epoch Size', 10, 100000, 1, 500,
True, True,
"The length of each epoch (likely should be the same as the max episode length)"),
modelFreeAgent.ModelFreeAgent.Parameter('PPO Epsilon', 0.00001, 0.5, 0.00001, 0.2,
True, True,
"A measure of how much a policy can change w.r.t. the states it's trained on"),
modelFreeAgent.ModelFreeAgent.Parameter('PPO Lambda', 0.5, 1, 0.001, 0.95,
True, True,
"A parameter that when set below 1, can decrease variance while maintaining reasonable bias")]
parameters = modelFreeAgent.ModelFreeAgent.parameters + newParameters
def __init__(self, *args):
paramLen = len(PPONative.newParameters)
super().__init__(*args[:-paramLen])
self.batch_size, _, _, self.horizon, self.epochSize, _, _ = [int(arg) for arg in args[-paramLen:]]
_, self.policy_lr, self.value_lr, _, _, self.epsilon, self.lam = [arg for arg in args[-paramLen:]]
oldwd = pathlib.Path().absolute()
curDir = oldwd / "Agents/Native/ppoNative"
os.chdir(curDir.as_posix())
self.ffi = cffi.FFI()
if platform.system() == "Windows":
if not importlib.util.find_spec("Agents.Native.ppoNative.Release._ppoNative"):
self.compileLib(curDir)
import Agents.Native.ppoNative.Release._ppoNative as _ppoNative
else:
if not importlib.util.find_spec("Agents.Native.ppoNative._ppoNative"):
self.compileLib(curDir)
import Agents.Native.ppoNative._ppoNative as _ppoNative
self.nativeInterface = _ppoNative.lib
self.nativeppo = self.nativeInterface.createAgentc(self.state_size[0], self.action_size,
self.policy_lr, self.value_lr, self.gamma, self.horizon, self.epochSize,
self.batch_size, self.epsilon, self.lam)
os.chdir(oldwd.as_posix())
def compileLib(self, curDir):
headerName = curDir / "ppoNative.h"
outputDir = (curDir / "Release") if platform.system() == "Windows" else curDir
with open(headerName) as headerFile:
self.ffi.cdef(headerFile.read())
self.ffi.set_source(
"_ppoNative",
"""
#include "ppoNative.h"
""",
libraries=["ppoNative"],
library_dirs=[outputDir.as_posix()],
include_dirs=[curDir.as_posix()]
)
self.ffi.compile(verbose=True, tmpdir=outputDir)
def __del__(self):
self.nativeInterface.freeAgentc(self.nativeppo)
def choose_action(self, state):
cState = self.ffi.new("float[]", list(state))
action = self.nativeInterface.chooseActionc(self.nativeppo, cState)
return action
def remember(self, state, action, reward, new_state, done=False):
cState = self.ffi.new("float[]", list(state))
#cNewState = self.ffi.new("float[]", new_state)
loss = self.nativeInterface.rememberc(self.nativeppo, cState, action, reward, done)
return loss
def update(self):
pass
def reset(self):
pass
def __deepcopy__(self, memodict={}):
pass
def save(self, filename):
pass
def load(self, filename):
pass
def memsave(self):
pass
def memload(self, mem):
pass | {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,579 | RobertCordingly/easyRL-v0 | refs/heads/master | /EasyRL.py | from MVC import controller
import sys
controller.Controller(sys.argv)
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,580 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/deepQ.py | import joblib
import numpy as np
import random
from Agents import modelFreeAgent
from Agents.Collections import ExperienceReplay
from Agents.Collections.TransitionFrame import TransitionFrame
class DeepQ(modelFreeAgent.ModelFreeAgent):
displayName = 'Deep Q'
newParameters = [modelFreeAgent.ModelFreeAgent.Parameter('Batch Size', 1, 256, 1, 32, True, True, "The number of transitions to consider simultaneously when updating the agent"),
modelFreeAgent.ModelFreeAgent.Parameter('Memory Size', 1, 655360, 1, 1000, True, True, "The maximum number of timestep transitions to keep stored"),
modelFreeAgent.ModelFreeAgent.Parameter('Target Update Interval', 1, 100000, 1, 200, True, True, "The distance in timesteps between target model updates")]
parameters = modelFreeAgent.ModelFreeAgent.parameters + newParameters
def __init__(self, *args):
paramLen = len(DeepQ.newParameters)
super().__init__(*args[:-paramLen])
self.batch_size, self.memory_size, self.target_update_interval = [int(arg) for arg in args[-paramLen:]]
self.model = self.buildQNetwork()
self.target = self.buildQNetwork()
empty_state = self.get_empty_state()
self.memory = ExperienceReplay.ReplayBuffer(self, self.memory_size, TransitionFrame(empty_state, -1, 0, empty_state, False))
self.total_steps = 0
self.allMask = np.full((1, self.action_size), 1)
self.allBatchMask = np.full((self.batch_size, self.action_size), 1)
def choose_action(self, state):
qval = self.predict(state, False)
epsilon = self.min_epsilon + (self.max_epsilon - self.min_epsilon) * np.exp(-self.decay_rate * self.time_steps)
# TODO: Put epsilon at a level near this
# if random.random() > epsilon:
action = np.argmax(qval)
# else:
# action = self.state_size.sample()
return action
def sample(self):
return self.memory.sample(self.batch_size)
def addToMemory(self, state, action, reward, new_state, done):
self.memory.append_frame(TransitionFrame(state, action, reward, new_state, done))
def remember(self, state, action, reward, new_state, done=False):
self.addToMemory(state, action, reward, new_state, done)
loss = 0
if len(self.memory) < 2*self.batch_size:
return loss
batch_idxes, mini_batch = self.sample()
X_train, Y_train = self.calculateTargetValues(mini_batch)
loss = self.model.train_on_batch(X_train, Y_train)
'''
If the memory is PrioritiedReplayBuffer then calculate the loss and
update the priority of the sampled transitions
'''
if (isinstance(self.memory, ExperienceReplay.PrioritizedReplayBuffer)):
# Calculate the loss of the batch as the TD error
td_errors = self.compute_loss(mini_batch, np.amax(Y_train, axis = 1))
# Update the priorities.
for idx, td_error in zip(batch_idxes, td_errors):
self.memory.update_error(idx, td_error)
self.updateTarget()
return loss
def updateTarget(self):
if self.total_steps >= 2*self.batch_size and self.total_steps % self.target_update_interval == 0:
self.target.set_weights(self.model.get_weights())
print("target updated")
self.total_steps += 1
def predict(self, state, isTarget):
shape = (1,) + self.state_size
state = np.reshape(state, shape)
if isTarget:
result = self.target.predict([state, self.allMask])
else:
result = self.model.predict([state, self.allMask])
return result
def update(self):
pass
def reset(self):
pass
def create_one_hot(self, vector_length, hot_index):
output = np.zeros((vector_length))
if hot_index != -1:
output[hot_index] = 1
return output
def buildQNetwork(self):
from tensorflow.python.keras.optimizer_v2.adam import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input, Flatten, multiply
inputA = Input(shape=self.state_size)
inputB = Input(shape=(self.action_size,))
x = Flatten()(inputA)
x = Dense(24, input_dim=self.state_size, activation='relu')(x) # fully connected
x = Dense(24, activation='relu')(x)
x = Dense(self.action_size, activation='linear')(x)
outputs = multiply([x, inputB])
model = Model(inputs=[inputA, inputB], outputs=outputs)
model.compile(loss='mse', optimizer=Adam(lr=0.001))
return model
def calculateTargetValues(self, mini_batch):
X_train = [np.zeros((self.batch_size,) + self.state_size), np.zeros((self.batch_size,) + (self.action_size,))]
next_states = np.zeros((self.batch_size,) + self.state_size)
for index_rep, transition in enumerate(mini_batch):
states, actions, rewards, _, dones = transition
X_train[0][index_rep] = transition.state
X_train[1][index_rep] = self.create_one_hot(self.action_size, transition.action)
next_states[index_rep] = transition.next_state
Y_train = np.zeros((self.batch_size,) + (self.action_size,))
qnext = self.target.predict([next_states, self.allBatchMask])
qnext = np.amax(qnext, 1)
for index_rep, transition in enumerate(mini_batch):
if transition.is_done:
Y_train[index_rep][transition.action] = transition.reward
else:
Y_train[index_rep][transition.action] = transition.reward + qnext[index_rep] * self.gamma
print("X train: " + str(X_train))
print("Y train: " + str(Y_train))
return X_train, Y_train
def compute_loss(self, mini_batch, q_target: list = None):
"""
Computes the loss of each sample in the mini_batch. The loss is
calculated as the TD Error of the Q-Network Will use the given
list of q_target value if provided instead of calculating.
:param mini_batch: is the mini batch to compute the loss of.
:param q_target: is a list of q_target values to use in the
calculation of the loss. This is optional. The q_target values
will be calculated if q_target is not provided.
:type q_target: list
"""
# Get the states from the batch.
states = np.zeros((self.batch_size,) + self.state_size)
for batch_idx, transition in enumerate(mini_batch):
states[batch_idx] = transition.state
# Get the actions from the batch.
actions = [transition.action for transition in mini_batch]
'''
If the q_target is None then calculate the target q-value using the
target QNetwork.
'''
if (q_target is None):
next_states = np.zeros((self.batch_size,) + self.state_size)
for batch_idx, transition in enumerate(mini_batch):
next_states[batch_idx] = transition.next_state
rewards = [transition.reward for transition in mini_batch]
is_dones = np.array([transition.is_done for transition in mini_batch]).astype(float)
q_target = self.target.predict([next_states, self.allBatchMask])
q_target = rewards + (1 - is_dones) * self.gamma * np.amax(q_target, 1)
# Get from the current q-values from the QNetwork.
q = self.model.predict([states, self.allBatchMask])
q = np.choose(actions, q.T)
# Calculate and return the loss (TD Error).
loss = (q_target - q) ** 2
return loss
def apply_hindsight(self):
'''
The hindsight replay buffer method checks for
the instance, if instance found add to the memory
'''
if (isinstance(self.memory, ExperienceReplay.HindsightReplayBuffer)):
self.memory.apply_hindsight()
def __deepcopy__(self, memodict={}):
pass
def save(self, filename):
mem = self.model.get_weights()
joblib.dump((DeepQ.displayName, mem), filename)
def load(self, filename):
name, mem = joblib.load(filename)
if name != DeepQ.displayName:
print('load failed')
else:
self.model.set_weights(mem)
self.target.set_weights(mem)
def memsave(self):
return self.model.get_weights()
def memload(self, mem):
self.model.set_weights(mem)
self.target.set_weights(mem)
class DeepQPrioritized(DeepQ):
displayName = 'Deep Q Prioritized'
newParameters = [DeepQ.Parameter('Alpha', 0.00, 1.00, 0.001, 0.60, True, True, "The amount of prioritization that gets used.")]
parameters = DeepQ.parameters + newParameters
def __init__(self, *args):
paramLen = len(DeepQPrioritized.newParameters)
super().__init__(*args[:-paramLen])
self.alpha = float(args[-paramLen])
empty_state = self.get_empty_state()
self.memory = ExperienceReplay.PrioritizedReplayBuffer(self, self.memory_size, TransitionFrame(empty_state, -1, 0, empty_state, False), alpha = self.alpha)
class DeepQHindsight(DeepQ):
displayName = 'Deep Q Hindsight'
newParameters = []
parameters = DeepQ.parameters + newParameters
def __init__(self, *args):
paramLen = len(DeepQHindsight.newParameters)
super().__init__(*args)
empty_state = self.get_empty_state()
self.memory = ExperienceReplay.HindsightReplayBuffer(self, self.memory_size, TransitionFrame(empty_state, -1, 0, empty_state, False))
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,581 | RobertCordingly/easyRL-v0 | refs/heads/master | /Environments/atariEnv.py | from Environments import environment
import gym
from PIL import Image
import cv2
import numpy as np
from abc import ABC
import random
class AtariEnv(environment.Environment, ABC):
displayName = 'AtariEnv'
subEnvs = []
def __init__(self):
self.image_width = 84
self.image_height = 84
self.state_size = (self.image_width, self.image_height, 1)
self.env = None
self.state = None
self.rawImg = None
self.done = None
self.total_rewards = None
def step(self, action):
observation, reward, self.done, info = self.env.step(action)
self.state = self.preprocess(observation)
return reward
def reset(self):
self.rawImg = self.env.reset()
self.state = self.preprocess(self.rawImg)
self.done = False
self.total_rewards = 0
def sample_action(self):
return self.env.action_space.sample()
def preprocess(self, image):
self.rawImg = image
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
image = cv2.resize(image, (self.image_width, self.image_height), interpolation=cv2.INTER_AREA)
return np.reshape(image, self.state_size)
def render(self, mode='RGB'):
return Image.fromarray(self.rawImg.astype('uint8'), 'RGB')
class adventureEnv(AtariEnv):
displayName = 'adventure'
def __init__(self):
super().__init__()
self.env = gym.make('Adventure-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(adventureEnv)
class air_raidEnv(AtariEnv):
displayName = 'air_raid'
def __init__(self):
super().__init__()
self.env = gym.make('AirRaid-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(air_raidEnv)
class alienEnv(AtariEnv):
displayName = 'alien'
def __init__(self):
super().__init__()
self.env = gym.make('Alien-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(alienEnv)
class amidarEnv(AtariEnv):
displayName = 'amidar'
def __init__(self):
super().__init__()
self.env = gym.make('Amidar-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(amidarEnv)
class assaultEnv(AtariEnv):
displayName = 'assault'
def __init__(self):
super().__init__()
self.env = gym.make('Assault-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(assaultEnv)
class asterixEnv(AtariEnv):
displayName = 'asterix'
def __init__(self):
super().__init__()
self.env = gym.make('Asterix-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(asterixEnv)
class asteroidsEnv(AtariEnv):
displayName = 'asteroids'
def __init__(self):
super().__init__()
self.env = gym.make('Asteroids-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(asteroidsEnv)
class atlantisEnv(AtariEnv):
displayName = 'atlantis'
def __init__(self):
super().__init__()
self.env = gym.make('Atlantis-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(atlantisEnv)
class bank_heistEnv(AtariEnv):
displayName = 'bank_heist'
def __init__(self):
super().__init__()
self.env = gym.make('BankHeist-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(bank_heistEnv)
class battle_zoneEnv(AtariEnv):
displayName = 'battle_zone'
def __init__(self):
super().__init__()
self.env = gym.make('BattleZone-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(battle_zoneEnv)
class beam_riderEnv(AtariEnv):
displayName = 'beam_rider'
def __init__(self):
super().__init__()
self.env = gym.make('BeamRider-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(beam_riderEnv)
class berzerkEnv(AtariEnv):
displayName = 'berzerk'
def __init__(self):
super().__init__()
self.env = gym.make('Berzerk-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(berzerkEnv)
class bowlingEnv(AtariEnv):
displayName = 'bowling'
def __init__(self):
super().__init__()
self.env = gym.make('Bowling-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(bowlingEnv)
class boxingEnv(AtariEnv):
displayName = 'boxing'
def __init__(self):
super().__init__()
self.env = gym.make('Boxing-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(boxingEnv)
class breakoutEnv(AtariEnv):
displayName = 'breakout'
def __init__(self):
super().__init__()
self.env = gym.make('Breakout-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(breakoutEnv)
class carnivalEnv(AtariEnv):
displayName = 'carnival'
def __init__(self):
super().__init__()
self.env = gym.make('Carnival-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(carnivalEnv)
class centipedeEnv(AtariEnv):
displayName = 'centipede'
def __init__(self):
super().__init__()
self.env = gym.make('Centipede-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(centipedeEnv)
class chopper_commandEnv(AtariEnv):
displayName = 'chopper_command'
def __init__(self):
super().__init__()
self.env = gym.make('ChopperCommand-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(chopper_commandEnv)
class crazy_climberEnv(AtariEnv):
displayName = 'crazy_climber'
def __init__(self):
super().__init__()
self.env = gym.make('CrazyClimber-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(crazy_climberEnv)
class demon_attackEnv(AtariEnv):
displayName = 'demon_attack'
def __init__(self):
super().__init__()
self.env = gym.make('DemonAttack-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(demon_attackEnv)
class double_dunkEnv(AtariEnv):
displayName = 'double_dunk'
def __init__(self):
super().__init__()
self.env = gym.make('DoubleDunk-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(double_dunkEnv)
class elevator_actionEnv(AtariEnv):
displayName = 'elevator_action'
def __init__(self):
super().__init__()
self.env = gym.make('ElevatorAction-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(elevator_actionEnv)
class enduroEnv(AtariEnv):
displayName = 'enduro'
def __init__(self):
super().__init__()
self.env = gym.make('Enduro-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(enduroEnv)
class fishing_derbyEnv(AtariEnv):
displayName = 'fishing_derby'
def __init__(self):
super().__init__()
self.env = gym.make('FishingDerby-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(fishing_derbyEnv)
class freewayEnv(AtariEnv):
displayName = 'freeway'
def __init__(self):
super().__init__()
self.env = gym.make('Freeway-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(freewayEnv)
class frostbiteEnv(AtariEnv):
displayName = 'frostbite'
def __init__(self):
super().__init__()
self.env = gym.make('Frostbite-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(frostbiteEnv)
class gopherEnv(AtariEnv):
displayName = 'gopher'
def __init__(self):
super().__init__()
self.env = gym.make('Gopher-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(gopherEnv)
class gravitarEnv(AtariEnv):
displayName = 'gravitar'
def __init__(self):
super().__init__()
self.env = gym.make('Gravitar-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(gravitarEnv)
class heroEnv(AtariEnv):
displayName = 'hero'
def __init__(self):
super().__init__()
self.env = gym.make('Hero-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(heroEnv)
class ice_hockeyEnv(AtariEnv):
displayName = 'ice_hockey'
def __init__(self):
super().__init__()
self.env = gym.make('IceHockey-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(ice_hockeyEnv)
class jamesbondEnv(AtariEnv):
displayName = 'jamesbond'
def __init__(self):
super().__init__()
self.env = gym.make('Jamesbond-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(jamesbondEnv)
class journey_escapeEnv(AtariEnv):
displayName = 'journey_escape'
def __init__(self):
super().__init__()
self.env = gym.make('JourneyEscape-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(journey_escapeEnv)
class kangarooEnv(AtariEnv):
displayName = 'kangaroo'
def __init__(self):
super().__init__()
self.env = gym.make('Kangaroo-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(kangarooEnv)
class krullEnv(AtariEnv):
displayName = 'krull'
def __init__(self):
super().__init__()
self.env = gym.make('Krull-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(krullEnv)
class kung_fu_masterEnv(AtariEnv):
displayName = 'kung_fu_master'
def __init__(self):
super().__init__()
self.env = gym.make('KungFuMaster-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(kung_fu_masterEnv)
class montezuma_revengeEnv(AtariEnv):
displayName = 'montezuma_revenge'
def __init__(self):
super().__init__()
self.env = gym.make('MontezumaRevenge-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(montezuma_revengeEnv)
class ms_pacmanEnv(AtariEnv):
displayName = 'ms_pacman'
def __init__(self):
super().__init__()
self.env = gym.make('MsPacman-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(ms_pacmanEnv)
class name_this_gameEnv(AtariEnv):
displayName = 'name_this_game'
def __init__(self):
super().__init__()
self.env = gym.make('NameThisGame-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(name_this_gameEnv)
class phoenixEnv(AtariEnv):
displayName = 'phoenix'
def __init__(self):
super().__init__()
self.env = gym.make('Phoenix-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(phoenixEnv)
class pitfallEnv(AtariEnv):
displayName = 'pitfall'
def __init__(self):
super().__init__()
self.env = gym.make('Pitfall-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(pitfallEnv)
class pongEnv(AtariEnv):
displayName = 'pong'
def __init__(self):
super().__init__()
self.env = gym.make('Pong-v0')
self.action_size = 2
def step(self, action):
if action == 0:
action = 2
else:
action = 5
return super().step(action)
def sample_action(self):
return random.randrange(self.action_size)
AtariEnv.subEnvs.append(pongEnv)
class pooyanEnv(AtariEnv):
displayName = 'pooyan'
def __init__(self):
super().__init__()
self.env = gym.make('Pooyan-v0')
self.action_size = self.env.action_space.n
0
AtariEnv.subEnvs.append(pooyanEnv)
class private_eyeEnv(AtariEnv):
displayName = 'private_eye'
def __init__(self):
super().__init__()
self.env = gym.make('PrivateEye-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(private_eyeEnv)
class qbertEnv(AtariEnv):
displayName = 'qbert'
def __init__(self):
super().__init__()
self.env = gym.make('Qbert-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(qbertEnv)
class riverraidEnv(AtariEnv):
displayName = 'riverraid'
def __init__(self):
super().__init__()
self.env = gym.make('Riverraid-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(riverraidEnv)
class road_runnerEnv(AtariEnv):
displayName = 'road_runner'
def __init__(self):
super().__init__()
self.env = gym.make('RoadRunner-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(road_runnerEnv)
class robotankEnv(AtariEnv):
displayName = 'robotank'
def __init__(self):
super().__init__()
self.env = gym.make('Robotank-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(robotankEnv)
class seaquestEnv(AtariEnv):
displayName = 'seaquest'
def __init__(self):
super().__init__()
self.env = gym.make('Seaquest-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(seaquestEnv)
class skiingEnv(AtariEnv):
displayName = 'skiing'
def __init__(self):
super().__init__()
self.env = gym.make('Skiing-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(skiingEnv)
class solarisEnv(AtariEnv):
displayName = 'solaris'
def __init__(self):
super().__init__()
self.env = gym.make('Solaris-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(solarisEnv)
class space_invadersEnv(AtariEnv):
displayName = 'space_invaders'
def __init__(self):
super().__init__()
self.env = gym.make('SpaceInvaders-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(space_invadersEnv)
class star_gunnerEnv(AtariEnv):
displayName = 'star_gunner'
def __init__(self):
super().__init__()
self.env = gym.make('StarGunner-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(star_gunnerEnv)
class tennisEnv(AtariEnv):
displayName = 'tennis'
def __init__(self):
super().__init__()
self.env = gym.make('Tennis-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(tennisEnv)
class time_pilotEnv(AtariEnv):
displayName = 'time_pilot'
def __init__(self):
super().__init__()
self.env = gym.make('TimePilot-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(time_pilotEnv)
class tutankhamEnv(AtariEnv):
displayName = 'tutankham'
def __init__(self):
super().__init__()
self.env = gym.make('Tutankham-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(tutankhamEnv)
class up_n_downEnv(AtariEnv):
displayName = 'up_n_down'
def __init__(self):
super().__init__()
self.env = gym.make('UpNDown-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(up_n_downEnv)
class ventureEnv(AtariEnv):
displayName = 'venture'
def __init__(self):
super().__init__()
self.env = gym.make('Venture-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(ventureEnv)
class video_pinballEnv(AtariEnv):
displayName = 'video_pinball'
def __init__(self):
super().__init__()
self.env = gym.make('VideoPinball-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(video_pinballEnv)
class wizard_of_worEnv(AtariEnv):
displayName = 'wizard_of_wor'
def __init__(self):
super().__init__()
self.env = gym.make('WizardOfWor-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(wizard_of_worEnv)
class yars_revengeEnv(AtariEnv):
displayName = 'yars_revenge'
def __init__(self):
super().__init__()
self.env = gym.make('YarsRevenge-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(yars_revengeEnv)
class zaxxonEnv(AtariEnv):
displayName = 'zaxxon'
def __init__(self):
super().__init__()
self.env = gym.make('Zaxxon-v0')
self.action_size = self.env.action_space.n
AtariEnv.subEnvs.append(zaxxonEnv)
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,582 | RobertCordingly/easyRL-v0 | refs/heads/master | /Environments/classicControlEnv.py | from Environments import environment
class ClassicControlEnv(environment.Environment):
displayName = 'Classic Control'
def __init__(self):
self.env = None
self.state = None
self.done = None
self.total_rewards = None
def step(self, action):
observation, reward, self.done, info = self.env.step(action)
self.state = observation
return reward
def reset(self):
self.state = self.env.reset()
self.done = False
self.total_rewards = 0
def sample_action(self):
return self.env.action_space.sample()
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,583 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/ddpg.py | import joblib
import tensorflow as tf
import numpy as np
# from tensorflow.python.keras.optimizer_v2.adam import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input, Flatten, multiply
from Agents import modelFreeAgent
from Agents.Collections import ExperienceReplay
from Agents.Collections.TransitionFrame import TransitionFrame
tf.keras.backend.set_floatx('float64')
class DDPG(modelFreeAgent.ModelFreeAgent):
displayName = 'DDPG'
newParameters = [modelFreeAgent.ModelFreeAgent.Parameter('Batch Size', 1, 256, 1, 32, True, True,
"The number of transitions to consider simultaneously when updating the agent"),
modelFreeAgent.ModelFreeAgent.Parameter('Memory Size', 1, 655360, 1, 1000, True, True,
"The maximum number of timestep transitions to keep stored"),
modelFreeAgent.ModelFreeAgent.Parameter('Target Update Interval', 1, 100000, 1, 200, True, True,
"The distance in timesteps between target model updates"),
modelFreeAgent.ModelFreeAgent.Parameter('Tau', 0.00, 1.00, 0.001, 0.97, True, True,
"The rate at which target models update")]
parameters = modelFreeAgent.ModelFreeAgent.parameters + newParameters
def __init__(self, *args):
# Initializing model parameters
paramLen = len(DDPG.newParameters)
super().__init__(*args[:-paramLen])
self.batch_size, self.memory_size, self.target_update_interval, self.tau = [int(arg) for arg in args[-paramLen:]]
empty_state = self.get_empty_state()
self.memory = ExperienceReplay.ReplayBuffer(self, self.memory_size, TransitionFrame(empty_state, -1, 0, empty_state, False))
# Learning rate for actor-critic models
critic_lr = 0.002
actor_lr = 0.001
self.critic_optimizer = tf.keras.optimizers.Adam(critic_lr)
self.actor_optimizer = tf.keras.optimizers.Adam(actor_lr)
# self.ou_noise = OUNoise(self.action_size)
self.actor_model = self.get_actor()
self.critic_model = self.get_critic()
self.target_actor = self.get_actor()
self.target_critic = self.get_critic()
# Making the weights equal initially
self.target_actor.set_weights(self.actor_model.get_weights())
self.target_critic.set_weights(self.critic_model.get_weights())
self.total_steps = 0
self.allMask = np.full((1, self.action_size), 1)
self.allBatchMask = np.full((self.batch_size, self.action_size), 1)
def get_actor(self):
# Initialize actor network weights between -3e-3 and 3-e3
last_in = tf.random_uniform_initializer(minval=-0.003, maxval=0.003)
input_shape = self.state_size
inputA = Input(input_shape)
x = Flatten()(inputA)
x = Dense(24, activation='relu')(x) # fully connected
x = Dense(24, activation='relu')(x)
x = Dense(self.action_size, activation='linear', kernel_initializer=last_in)(x)
model = Model(inputs=inputA, outputs=x)
model.compile(loss='mse', optimizer=self.actor_optimizer)
return model
def get_critic(self):
# Generate critic network model
input_shape = self.state_size
inputA = Input(shape=input_shape)
inputB = Input(shape=(self.action_size,))
x = Flatten()(inputA)
x = Dense(24, input_dim=self.state_size, activation='relu')(x) # fully connected
x = Dense(24, activation='relu')(x)
x = Dense(1, activation='linear')(x)
outputs = multiply([x, inputB])
model = Model(inputs=[inputA, inputB], outputs=outputs)
model.compile(loss='mse', optimizer=self.critic_optimizer)
return model
def ou_noise(self, a, p=0.15, mu=0, differential=1e-1, sigma=0.2, dim=1):
# Exploration noise generation
return a + p * (mu - a) * differential + sigma * np.sqrt(differential) * np.random.normal(size=dim)
def choose_action(self, state):
bg_noise = np.zeros(self.action_size)
bg_noise = self.ou_noise(bg_noise, dim=self.action_size)
u = self.predict(state, False)
sampled_actions = tf.squeeze(u)
sampled_actions = sampled_actions.numpy() + bg_noise
# Clipping action between bounds -0.3 and 0.3
legal_action = np.clip(sampled_actions, -0.3, 0.3)[0]
legal_action = np.squeeze(legal_action)
action_returned = legal_action.astype(int)
return action_returned
def sample(self):
return self.memory.sample(self.batch_size)
def addToMemory(self, state, action, reward, new_state, done):
self.memory.append_frame(TransitionFrame(state, action, reward, new_state, done))
def remember(self, state, action, reward, new_state, done=False):
self.addToMemory(state, action, reward, new_state, done)
loss = 0
if len(self.memory) < 2*self.batch_size:
return loss
_, mini_batch = self.sample()
X_train, Y_train, states = self.learn(mini_batch)
# Train critic
with tf.GradientTape() as tape:
critic_value = self.critic_model([X_train])
critic_loss = tf.math.reduce_mean(tf.math.square(Y_train - critic_value))
critic_grad = tape.gradient(critic_loss, self.critic_model.trainable_variables)
self.critic_optimizer.apply_gradients(zip(critic_grad, self.critic_model.trainable_variables))
self.allBatchMask = self.allBatchMask.astype(float)
actions = self.predict(states, False)
actions = tf.convert_to_tensor(actions)
o = self.critic_grads(states, self.allBatchMask)
# Computing gradients using critic value"""
critic_value = self.critic_model([states, self.allBatchMask], training=True)
critic_value = tf.squeeze(critic_value)
with tf.GradientTape() as tape:
# Used `-value` as we want to maximize the value given
# by the critic for our actions
# actor_loss = -tf.math.reduce_mean(critic_value)
grad = tape.gradient(critic_value, actions)
with tf.GradientTape() as tape:
actor_grad = tape.gradient(self.predict(states, False), self.actor_model.trainable_variables)
self.actor_optimizer.apply_gradients(zip(actor_grad, self.actor_model.trainable_variables))
self.updateTarget()
return critic_loss.numpy()
def updateTarget(self):
if self.total_steps >= 2*self.batch_size and self.total_steps % self.target_update_interval == 0:
actor_weights = self.actor_model.get_weights()
t_actor_weights = self.target_actor.get_weights()
critic_weights = self.critic_model.get_weights()
t_critic_weights = self.target_critic.get_weights()
for i in range(len(actor_weights)):
t_actor_weights[i] = self.tau * actor_weights[i] + (1 - self.tau) * t_actor_weights[i]
for i in range(len(critic_weights)):
t_critic_weights[i] = self.tau * critic_weights[i] + (1 - self.tau) * t_critic_weights[i]
self.target_actor.set_weights(t_actor_weights)
self.target_critic.set_weights(t_critic_weights)
print("targets updated")
self.total_steps += 1
for ind in range(len(self.actor_model.get_weights())):
self.target_actor.get_weights()[ind] = self.tau * self.actor_model.get_weights()[ind] + (1 - self.tau) * self.target_actor.get_weights()[ind]
for ind in range(len(self.critic_model.get_weights())):
self.target_critic.get_weights()[ind] = self.tau * self.critic_model.get_weights()[ind] + (1 - self.tau) * self.target_critic.get_weights()[ind]
self.total_steps += 1
def predict(self, state, isTarget):
shape = (-1,) + self.state_size
state = np.reshape(state, shape)
# state = tf.cast(state, dtype=tf.float32)
if isTarget:
result = self.target_actor([state])
else:
result = self.actor_model([state])
return result
def create_one_hot(self, vector_length, hot_index):
output = np.zeros(vector_length)
if hot_index != -1:
output[hot_index] = 1
return output
def learn(self, mini_batch):
X_train = [np.zeros((self.batch_size,) + self.state_size), np.zeros((self.batch_size,) + (self.action_size,))]
states = (np.zeros((self.batch_size,) + self.state_size))
next_states = (np.zeros((self.batch_size,) + self.state_size))
for index_rep, transition in enumerate(mini_batch):
X_train[0][index_rep] = transition.state
states[index_rep] = transition.state
X_train[1][index_rep] = self.create_one_hot(self.action_size, transition.action)
next_states[index_rep] = transition.next_state
Y_train = np.zeros((self.batch_size,) + (self.action_size,))
self.allBatchMask = self.allBatchMask.astype(float)
qnext = self.target_critic([next_states, self.allBatchMask])
qnext = np.amax(qnext, 1)
for index_rep, transition in enumerate(mini_batch):
if transition.is_done:
Y_train[index_rep][transition.action] = transition.reward
else:
Y_train[index_rep][transition.action] = transition.reward + qnext[index_rep] * self.gamma
# c_loss = -tf.math.reduce_mean(critic_values)
"""c_loss = tf.math.reduce_mean(tf.math.square(Y_train - critic_values))
grads = tape.gradient(c_loss, self.critic_model.trainable_variables)
with tf.GradientTape() as tape:
# actor_grad = tape.gradient(self.actor_model(X_train), self.actor_model.trainable_variables, grads)
actor_grad = tape.gradient(self.actor_model(X_train), self.actor_model.trainable_variables, grads)
self.actor_optimizer.apply_gradients(zip(actor_grad, self.actor_model.trainable_variables))"""
"""with tf.GradientTape() as tape:
with tf.GradientTape() as tape:
grads = tape.gradient(c_loss, self.critic_model.trainable_variables)
# actor_grad = tape.gradient(self.actor_model(X_train), self.actor_model.trainable_variables, grads)
actor_grad = tape.gradient(self.actor_model(X_train), self.actor_model.trainable_variables, grads)"""
return X_train, Y_train, states
def critic_grads(self, states, actions):
actions = tf.convert_to_tensor(actions)
with tf.GradientTape() as tape:
tape.watch(actions)
critic_value = self.critic_model([states, actions])
critic_value = tf.squeeze(critic_value)
return tape.gradient(critic_value, actions)
def save(self, filename):
act_mem = self.actor_model.get_weights()
crit_mem = self.critic_model.get_weights()
joblib.dump((DDPG.displayName, act_mem, crit_mem), filename)
def load(self, filename):
name, act_wt, crit_wt = joblib.load(filename)
if name != DDPG.displayName:
print('load failed')
else:
self.actor_model.set_weights(act_wt)
self.critic_model.set_weights(crit_wt)
def memsave(self):
actor_weights = self.actor_model.get_weights()
critic_weights = self.critic_model.get_weights()
return (actor_weights, critic_weights)
def memload(self, mem):
act_wt, crit_wt = mem
self.actor_model.set_weights(act_wt)
self.critic_model.set_weights(crit_wt)
def reset(self):
pass
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,584 | RobertCordingly/easyRL-v0 | refs/heads/master | /webpage/easyRL_app/apps.py | from django.apps import AppConfig
TASK_CREATE_INSTANCE = "createInstance"
TASK_EXPORT_MODEL = "exportModel"
TASK_TERMINAL_INSTANCE = "terminateInstance"
TASK_RUN_JOB = "runJob"
TASK_RUN_TEST = "runTest"
TASK_POLL = "poll"
TASK_IS_JOB_RUNNING = "isRunning"
TASK_HALT_JOB = "haltJob"
TASK_INFO = "info"
TASK_IMPORT = "import"
ERROR_NONE = 0
ERROR_UNAUTHENTICATED = 1
# https://stackoverflow.com/questions/287871/how-to-print-colored-text-to-the-terminal
FORMAT_HEADER = '\033[95m'
FORMAT_RED = "\033[1;31m"
FORMAT_BLUE = "\033[1;34m"
FORMAT_CYAN = "\033[1;36m"
FORMAT_GREEN = "\033[0;32m"
FORMAT_WARNING = '\033[93m'
FORMAT_FAIL = '\033[91m'
FORMAT_RESET = "\033[0;0m"
FORMAT_BOLD = "\033[;1m"
FORMAT_UNDERLINE = '\033[4m'
IMAGE_FILE = "Training-episode-{}.gif"
DATA_JSON_FILE = "data.json"
LOCAL_JSON_FILE = "/tmp/{}.json"
class EasyrlAppConfig(AppConfig):
name = 'easyRL_app'
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,585 | RobertCordingly/easyRL-v0 | refs/heads/master | /Environments/cartPoleEnvDiscrete.py | from Environments import cartPoleEnv
from PIL import Image, ImageDraw
import math
class CartPoleEnvDiscrete(cartPoleEnv.CartPoleEnv):
displayName = 'Cart Pole Discrete'
def __init__(self):
super().__init__()
self.state_size = (4,)
self.n_bins = 3
self.n_bins_angle = 12
self.cart_position_range = (-2.4, 2.4)
self.pole_angle_range = (-2, 2)
self.cart_velocity_range = (-1, 1)
self.angle_rate_range = (-3.5, 3.5)
def step(self, action):
reward = super().step(action)
self.state = self.build_state(self.state)
return reward
def render(self):
if self.env.state is None: return None
screen_width = 600
screen_height = 400
state = self.env.state
world_width = self.env.x_threshold * 2
scale = screen_width / world_width
cartx = state[0] * scale + screen_width / 2.0
carty = 100 # TOP OF CART
polewidth = 10.0
polelen = scale * (2 * self.env.length)
cartwidth = 50.0
cartheight = 30.0
image = Image.new('RGB', (screen_width, screen_height), 'white')
draw = ImageDraw.Draw(image)
l, r, t, b = -cartwidth / 2, cartwidth / 2, cartheight / 2, -cartheight / 2
axleoffset = cartheight / 4.0
cartPoints = [(cartx + l, carty + b), (cartx + l, carty + t), (cartx + r, carty + t), (cartx + r, carty + b)]
draw.polygon(cartPoints, fill='black')
l, r, t, b = -polewidth / 2, polewidth / 2, polelen - polewidth / 2, -polewidth / 2
t, b = t + axleoffset, b + axleoffset
l, r, t, b = cartx + l, cartx + r, carty + t, carty + b
polePoints = [(l, b), (l, t), (r, t), (r, b)]
for i, (x, y) in enumerate(polePoints):
x -= cartx
y -= carty + axleoffset
x, y = x * math.cos(state[2]) + y * math.sin(state[2]), -x * math.sin(state[2]) + y * math.cos(state[2])
x += cartx
y += carty + axleoffset
polePoints[i] = x, y
draw.polygon(polePoints, fill=(204, 153, 102))
draw.chord([cartx - polewidth / 2, carty + axleoffset - polewidth / 2, cartx + polewidth / 2,
carty + axleoffset + polewidth / 2], 0, 360, fill=(127, 127, 284))
draw.line([(0, carty), (screen_width, carty)], fill='black')
return image.transpose(method=Image.FLIP_TOP_BOTTOM)
def reset(self):
super().reset()
self.state = self.build_state(self.state)
def to_bin(self, value, range, bins):
bin = int((value-range[0]) // ((range[1] - range[0]) / bins))
bin = max(min(bin, bins-1), 0)
return bin
def build_state(self, state):
cart_position, cart_velocity, pole_angle, angle_rate_of_change = state
new_cart_position = self.to_bin(cart_position, self.cart_position_range, self.n_bins)
new_cart_velocity = self.to_bin(cart_velocity, self.cart_velocity_range, self.n_bins)
new_pole_angle = self.to_bin(pole_angle, self.pole_angle_range, self.n_bins_angle)
new_angle_rate_of_change = self.to_bin(angle_rate_of_change, self.angle_rate_range, self.n_bins)
state = new_cart_position, new_cart_velocity, new_pole_angle, new_angle_rate_of_change
return state
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,586 | RobertCordingly/easyRL-v0 | refs/heads/master | /Custom Environments/drugDosing/drugDosingEnv.py | import pickle
from copy import deepcopy
from random import randint, uniform, random, seed
from interval import Interval
from Environments.environment import Environment
STATE12 = {Interval(-999, 0.0063): 1,
Interval(0.0063, 0.0125): 2,
Interval(0.0125, 0.025): 3,
Interval(0.025, 0.01): 4,
Interval(0.01, 0.05): 5,
Interval(0.05, 0.1): 6,
Interval(0.1, 0.2): 7,
Interval(0.2, 0.25): 8,
Interval(0.25, 0.3): 9,
Interval(0.3, 0.35): 10,
Interval(0.35, 0.4): 11,
Interval(0.4, 0.45): 12,
Interval(0.45, 0.5): 13,
Interval(0.5, 0.55): 14,
Interval(0.55, 0.6): 15,
Interval(0.6, 0.65): 16,
Interval(0.65, 0.7): 17,
Interval(0.7, 0.8): 18,
Interval(0.8, 0.9): 19,
Interval(0.9, 999): 20}
STATE3 = {Interval(-999, 0.03): 1,
Interval(0.03, 0.1): 2,
Interval(0.1, 0.2): 3,
Interval(0.2, 0.3): 4,
Interval(0.3, 0.4): 5,
Interval(0.4, 0.5): 6,
Interval(0.5, 0.6): 7,
Interval(0.6, 0.7): 8,
Interval(0.7, 0.8): 9,
Interval(0.8, 0.9): 10,
Interval(0.9, 1.): 11,
Interval(1., 1.2): 12,
Interval(1.2, 1.4): 13,
Interval(1.4, 1.6): 14,
Interval(1.6, 1.8): 15,
Interval(1.8, 2.): 16,
Interval(2., 2.2): 17,
Interval(2.2, 2.5): 18,
Interval(2.5, 3.): 19,
Interval(3., 999.): 20}
ACTION_N = (0., 0.01, 0.02, 0.03, 0.04, 0.06, 0.08, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.)
ACTION_P = \
(0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.78, 0.8, 0.82, 0.85, 0.87, 0.9, 0.91, 0.92, 0.93, 0.94, 0.95, 0.97, 0.98, 1.)
GOAL_N = 0.
GOAL_P = 0.15
U_MAX_N = 10.
U_MAX_P = 0.5
# x1=N,x2=T,x3=I,x4=C
# goal: x2=0 (& x1=1 in case3)
A1, A2, A3 = 0.2, 0.3, 0.1
B1, B2 = 1., 1.
C1, C2, C3, C4 = 1., 0.5, 1., 1.
D1, D2 = 0.2, 1.
R1, R2 = 1.5, 1.
S = 0.33
ALPHA = 0.3
RHO = 0.01
BETA = 0.8
# Default setting (1: young patient, 2: young pregnant woman, 3: elderly patient)
CASE = 1
MAX_ITER = 200
# Only case=1 is mentioned in the paper
# Also lower bound is added to avoid bizarre result (not mentioned in paper)
class Patient:
def __init__(self, default=True, case=1, is_first_stage=False, x1=None, x2=None, x3=None, x4=None, t=1, s=None):
if s:
seed(s)
self.case = case
self.is_first_stage = is_first_stage
self.t = t
self.A2 = A2 if default else uniform(.25, .5)
self.A1 = A1 if default else uniform(0.1, self.A2)
self.A3 = A3 if default else uniform(0.1, self.A1)
self.B2 = B2 if default else 1.
self.B1 = B1 if default else uniform(1., 1.5)
self.C1 = C1 if default else uniform(.3, 1.)
self.C2 = C2 if default else uniform(.3, 1.)
self.C3 = C3 if default else uniform(.3, 1.)
self.C4 = C4 if default else uniform(.3, 1.)
self.D1 = D1 if default else uniform(.15, .3)
self.D2 = D2 if default else 1.
self.R1 = R1 if default else uniform(1.2, 1.6)
self.R2 = R2 if default else 1.
self.S = S if default else uniform(.3, .5)
self.ALPHA = ALPHA if default else uniform(.3, .5)
self.RHO = RHO if default else uniform(.01, .05)
self.x1 = x1 if x1 is not None else R2 if default else 0.6
self.x2 = x2 if x2 is not None else 1. - self.x1
self.x3 = x3 if x3 is not None else uniform(.1, .2)
self.x4 = x4 if x4 is not None else 0.
self.x20 = self.x2
self.s = self._get_state(self._get_error())
def update(self, action):
ek = self._get_error()
x1_u = self.R2 * self.x1 * (1 - self.B2 * self.x1) - self.C4 * self.x1 * self.x2 - self.A3 * self.x1 * self.x4
x2_u = self.R1 * self.x2 * (
1 - self.B1 * self.x2) - self.C2 * self.x3 * self.x2 - self.C3 * self.x2 * self.x1 - self.A2 * self.x2 * self.x4
x3_u = self.S + (self.RHO * self.x3 * self.x2) / (
self.ALPHA + self.x2) - self.C1 * self.x3 * self.x2 - self.D1 * self.x3 - self.A1 * self.x3 * self.x4
if self.case in (1, 3) or (self.case == 2 and not self.is_first_stage):
x4_u = -self.D2 * self.x4 + U_MAX_N * ACTION_N[action]
else:
x4_u = -self.D2 * self.x4 + U_MAX_P * ACTION_P[action]
self.x1 = max(self.x1 + x1_u, 0.)
self.x2 = max(self.x2 + x2_u, 0.)
self.x3 = max(self.x3 + x3_u, 0.)
self.x4 = max(self.x4 + x4_u, 0.)
ek_new = self._get_error()
self.s = self._get_state(ek_new)
self.t = self.t + 1
reward = self._get_reward(ek, ek_new)
done = self.x2 <= 1e-3 or self.x1 <= 1e-3 or self.t > MAX_ITER
cured = True if self.x2 <= 1e-3 and self.x1 >= 0.999 else False
dead = True if self.x1 <= 1e-3 else False
return self.s, reward, done, cured, dead, self._get_N(), self._get_T()
def _get_error(self):
return max(0., self.x2 - 0. if self.case in (1, 2) else BETA * self.x2 + (1. - BETA) * (1. - self.x1))
def _get_state(self, ek):
if self.case in (1, 2):
for i in STATE12:
if ek in i:
return STATE12[i]
else:
for i in STATE3:
if ek in i:
return STATE3[i]
@staticmethod
def _get_reward(ek, ek_new):
return float(ek - ek_new) / (ek + 1e-5) if ek_new < ek else 0.
def _get_N(self):
return abs(self.x1 - 1) * 100
def _get_T(self):
return self.x2 / (self.x20 + 1e-5) * 100
class DrugDosingEnv(Environment):
displayName = 'DrugDosing'
def __init__(self, case, is_first_stage):
super().__init__()
self.case = case
self.is_first_stage = is_first_stage
self.data = self.load_data()
self.patient = None
# For testing
self.agent = None
self.patients_test = None
def step(self, action):
result = self.patient.update(action)
return result[0], result[1], result[2]
def reset(self):
datas = self.data[randint(0, len(self.data) - 1)]
data = datas[randint(0, len(datas) - 2)]
self.patient = Patient(case=self.case, x1=data[0], x2=data[1], x3=data[2], x4=data[3], t=data[4])
return self.patient.s
def load_data(self):
if self.case in (1, 3) or (self.case == 2 and not self.is_first_stage):
return pickle.load(open('./Custom Environments/drugDosing/drugDosing_N', 'rb'))
else:
return pickle.load(open('./Custom Environments/drugDosing/drugDosing_P', 'rb'))
def set_agent(self, agent):
self.agent = agent
if self.case == 1:
self.patients_test = [Patient(default=False, case=1) for _ in range(15)]
# Advanced feature, only avilable when importing agent
def episode_finish(self, episode):
if self.agent:
if episode and not episode % 500:
self.agent.alpha *= 0.8
if episode and not episode % 1000 and self.case == 1:
self.print_NT()
def print_NT(self):
Ns, N1, N4, N7 = [], [], [], []
Ts, T1, T4, T7 = [], [], [], []
for p in self.patients_test:
t = 0
p = deepcopy(p)
T_added, N_added, p_cured, p_dead = False, False, False, False
qs, ss, x1s, x2s, = [], [], [], []
while t < MAX_ITER:
t += 1
q = self.agent.choose_action(p.s)
ss.append(p.s)
x1s.append(p.x1)
x2s.append(p.x2)
qs.append(q)
s, reward, done, cured, dead, N, T = p.update(q)
if t == 7:
N1.append(N)
T1.append(T)
elif t == 28:
N4.append(N)
T4.append(T)
elif t == 49:
N7.append(N)
T7.append(T)
if N <= 1e-5 and not N_added:
Ns.append(t)
N_added = True
if T <= 1e-5 and not T_added:
Ts.append(t)
T_added = True
if dead:
p_dead = True
print('dead!')
# return
break
if cured:
if t < 7:
N1.append(0)
T1.append(0)
if t < 28:
N4.append(0)
N4.append(0)
if t < 49:
N7.append(0)
T7.append(0)
p_cured = True
print('cured!')
break
if not p_cured and not p_dead:
pass
print('survied!')
# return
# print(qs)
# print(ss)
# print(x1s)
# print(x2s)
if len(Ns) + len(Ts) != 30:
print('')
return
print('Number of days to achieve the target value: N_avg:' + str(sum(Ns) / len(Ns)))
print('Number of days to achieve the target value: N_max:' + str(max(Ns)))
print('Number of days to achieve the target value: N_min:' + str(min(Ns)))
print('Number of days to achieve the target value: T_avg:' + str(sum(Ts) / len(Ts)))
print('Number of days to achieve the target value: T_max:' + str(max(Ts)))
print('Number of days to achieve the target value: T_min:' + str(min(Ts)))
print('Percent value; after 1 week of chemotherapy: N_avg:' + str(sum(N1) / len(N1)))
print('Percent value; after 1 week of chemotherapy: N_max:' + str(max(N1)))
print('Percent value; after 1 week of chemotherapy: N_min:' + str(min(N1)))
print('Percent value; after 1 week of chemotherapy: T_avg:' + str(sum(T1) / len(T1)))
print('Percent value; after 1 week of chemotherapy: T_max:' + str(max(T1)))
print('Percent value; after 1 week of chemotherapy: T_min:' + str(min(T1)))
print('Percent value; after 4 week of chemotherapy: N_avg:' + str(sum(N4) / len(N4)))
print('Percent value; after 4 week of chemotherapy: N_max:' + str(max(N4)))
print('Percent value; after 4 week of chemotherapy: N_min:' + str(min(N4)))
print('Percent value; after 4 week of chemotherapy: T_avg:' + str(sum(T4) / len(T4)))
print('Percent value; after 4 week of chemotherapy: T_max:' + str(max(T4)))
print('Percent value; after 4 week of chemotherapy: T_min:' + str(min(T4)))
print('Percent value; after 7 week of chemotherapy: N_avg:' + str(sum(N7) / len(N7)))
print('Percent value; after 7 week of chemotherapy: N_max:' + str(max(N7)))
print('Percent value; after 7 week of chemotherapy: N_min:' + str(min(N7)))
print('Percent value; after 7 week of chemotherapy: T_avg:' + str(sum(T7) / len(T7)))
print('Percent value; after 7 week of chemotherapy: T_max:' + str(max(T7)))
print('Percent value; after 7 week of chemotherapy: T_min:' + str(min(T7)))
class CustomEnv(Environment):
displayName = 'Custom_DrugDosing'
def __init__(self, case=CASE, is_first_stage=False):
super().__init__()
self.env = DrugDosingEnv(case, is_first_stage)
self.state = self.env.reset()
self.done = False
self.state_size = (20,)
self.action_size = 20
def step(self, action):
self.state, reward, self.done = self.env.step(action)
return reward
def reset(self):
self.state = self.env.reset()
self.done = False
def sample_action(self):
return randint(0, self.action_size - 1)
def render(self):
pass
def close(self):
pass
def generate(num, case, is_first_stage):
scenarios = []
for i in range(num):
# assumption
N = random()
T, I, C, t = 1 - N, uniform(.1, .2), 0., 1 if not (case == 2 and not is_first_stage) else 91
p = Patient(case=case, is_first_stage=is_first_stage, x1=N, x2=T, x3=I, x4=C, t=t)
scenario = [[p.x1, p.x2, p.x3, p.x4, p.t]]
for t in range(2 if not (case == 2 and not is_first_stage) else 92,
MAX_ITER + 1 if not (case == 2 and is_first_stage) else 91):
action = randint(0, 19)
_, _, done, _, _, _, _ = p.update(action)
scenario.append([p.x1, p.x2, p.x3, p.x4, p.t])
if done:
break
scenarios.append(scenario)
return scenarios
def generate_all(num):
data_N = generate(num, case=1, is_first_stage=False)
pickle.dump(data_N, open('./drugDosing_N', 'wb'))
print('Normal data generated')
data_P = generate(num, case=2, is_first_stage=True)
pickle.dump(data_P, open('./drugDosing_P', 'wb'))
print('Pregnant data generated')
# Uncomment next line to generate data for initialization
# generate_all(50000)
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,587 | RobertCordingly/easyRL-v0 | refs/heads/master | /Environments/frozenLakeEnv.py | from Environments import environment
import gym
import sys
from gym import utils
class FrozenLakeEnv(environment.Environment):
displayName = 'Frozen Lake'
def __init__(self):
self.env = gym.make('FrozenLake-v0')
self.action_size = self.env.action_space.n
self.state_size = (1,)
print(self.env.action_space, self.env.observation_space)
print(self.action_size, self.state_size)
self.state = None
self.done = None
self.total_rewards = None
def step(self, action):
new_state, reward, self.done, info = self.env.step(action)
self.state = (new_state,)
return reward
def reset(self):
self.state = (self.env.reset(),)
self.done = False
self.total_rewards = 0
def sample_action(self):
return self.env.action_space.sample()
def render(self):
pass
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,588 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/Collections/ExperienceReplay.py | import numpy as np
import random
from Agents.Collections.TransitionFrame import TransitionFrame, ActionTransitionFrame
from collections import deque
from collections.abc import Iterable
from copy import deepcopy
class ReplayBuffer:
"""
An Experience Replay Buffer for looking back and resampling transitions.
"""
def __init__(self, learner, max_length, empty_trans, history_length: int = 1):
"""
Constructor method
:param learner: the agent using this buffer
:type learner: Agent
:param max_length: the max length of this buffer
:type max_length: int
:param empty_trans: the empty transition to pad results with
:type empty_trans: TransitionFrame
:param history_length: the length of the history
:type history_length: int
"""
self._cur_idx = 0
self.empty_trans = empty_trans
self.history_length = history_length
self.learner = learner
self.max_length = max_length
self._size = 0
self._transitions = np.empty(max_length, dtype = object)
def __len__(self):
"""
Returns the length of this replay buffer.
:return: the length of the buffer
:rtype: int
"""
return self._size
def append_frame(self, transition_frame):
"""
Appends a given framed to the buffer.
:param transition_frame: the transition frame to append to the end
of this buffer
:type transition_frame: TransitionFrame
"""
# Add the transition to the buffer.
self._transitions[self._cur_idx] = transition_frame
# Increment the current index.
self._cur_idx = (self._cur_idx + 1) % self.max_length
# Increment the size if the size is less than the max_length.
if (self._size < self.max_length):
self._size += 1
def get_recent_action(self):
"""
Get the last n actions where n is equal to the history length of the
buffer.
:return: the recent actions
:rtype: list
"""
# Empty deque to prepend actions to.
result = deque(maxlen = self.history_length)
# Get the latest action until the history length or until the beginning
# of the buffer is reached.
for i in range((self._cur_idx - 1), ((self._cur_idx - 1) - self.history_length), -1):
if (i < 0 and not self.is_full()):
break
result.appendleft(self._transitions[i % self.max_length].action)
# Prepend -1s until the length of the deque equals the history
# length.
while (len(result) < self.history_length):
result.appendleft(-1)
# Return the recent actions as a list.
return list(result)
def get_recent_state(self):
"""
Get the last n states where n is equal to the history length of the
buffer.
:return: the recent states
:rtype: list
"""
# Empty deque to prepend states to.
result = deque(maxlen = self.history_length)
# Get the latest states until the history length or until the beginning
# of the buffer is reached.
for i in range((self._cur_idx - 1), ((self._cur_idx - 1) - self.history_length), -1):
if (i < 0 and not self.is_full()):
break
result.appendleft(self._transitions[i % self.max_length].state)
# Prepend empty states until the length of the deque equals the
# history length.
empty_state = self.empty_trans.state
while (len(result) < self.history_length):
result.appendleft(empty_state)
# Return the recent states as a list.
return list(result)
def get_transitions(self, start):
"""
Gets a list of transition from the given index. The length of the
the list will be equal to the history length of the buffer.
:param start: is the start index to get transtions from.
:type start: int
:return: the padded transitions
:rtype: list
"""
# Check if the index within bounds.
if (start < 0 or start >= len(self)):
raise ValueError("Start index is out of bounds.")
# If the history length is equal to 1, just return the transition
# at the given index.
if (self.history_length == 1):
return self._transitions[start]
# Empty list to store the transitions.
results = []
# Iterate through the buffer, adding transitions to the list.
for i in range(start, start + self.history_length):
transition = self._transitions[i % self.max_length]
results.append(transition)
if transition.is_done or i == (self._cur_idx - 1):
break
# Pad and return the transitions.
return self._pad(results)
def get_next_transitions(self, start, end):
result = []
for i in range(start, end):
result.append(self._transitions[i])
return result
def is_empty(self):
"""
Checks whether this replay buffer is empty.
:return: true if this replay buffer is empty, false otherwise
:rtype: int
"""
return self._size == 0
def is_full(self):
"""
Checks whether this replay buffer has reached the max length.
:return: true if this replay buffer is full, false otherwise
:rtype: int
"""
return self._size == self.max_length
def _pad(self, transitions):
"""
Adds padding to the beginning of the given list of transitions.
:param transitions: the list of transitions to pad
:type transitions: list
:return: the padded transitions
:rtype: list
"""
return [self.empty_trans for _ in
range(self.history_length - len(transitions))] + transitions
def peak_frame(self):
"""
Returns the last frame if the buffer is non-empty and an empty
transition frame otherwise.
:return: the last frame added to the buffer
:rtype: TransitionFrame
"""
if (self.is_empty()):
return self.empty_trans
return self._transitions[self._cur_idx - 1]
def sample(self, batch_size: int):
"""
Gets a number of samples equal equal to the batch size, each length
equal to the history length of this buffer.
:param batch_size: The number of samples to retrieve
:type batch_size: int
:return: the sample indexes and samples in lists
:rtype: tuple of lists
"""
if (not isinstance(batch_size, int) or batch_size < 1):
raise ValueError("The batch size must be a positive integer.")
result_idxes = []
result = []
for i in random.sample(range(len(self)), batch_size):
result_idxes.append(i)
result.append(self.get_transitions(i))
return result_idxes, result
def sum_priority(self):
"""
Returns the sum of priorities of all transitions in this
Prioritized Replay Buffer.
:return: the sum of all priorities
:rtype: float
"""
pass
def update_error(self, idx: int, error: float):
"""
Updates the priority of the transition frame at the given idx given
the error of the transition.
:param idx: is the index of the transition to update the priority of
:type idx: int
:param error: is error of the transition
:type error: float
"""
pass
def update_priority(self, idx: int, priority: float):
"""
Updates the priority of the transition frame at the given idx directly.
:param idx: is the index of the transition to update the priority of
:type idx: int
:param priority: is priority to give the transition
:type priority: float
"""
pass
class PrioritizedReplayBuffer(ReplayBuffer):
"""
An Experience Replay Buffer for looking back and resampling transitions
using a prioritized sampling technique based on loss.
Requires the agent to have a compute_loss function. The agent needs to
compute the loss of a sample and call either update_error or
update_priority to update the sample's priority.
"""
def __init__(self, learner, max_length, empty_trans, history_length: int = 1, alpha: float = 0.6):
super().__init__(learner, max_length, empty_trans, history_length)
self._alpha = alpha
self._epsilon = 0.00001
self._max_priority = 1.0
'''
A SumTree that stores the priorities of the transitions. The leaf
nodes, the second half tree array, are the direct priorities of the
transitions and the parent nodes contain the sum of the priorities
of all of their children.
'''
self._priority_tree = np.zeros(2 * self.max_length - 1)
def append_frame(self, transition_frame):
"""
Appends a given framed to the buffer.
:param transition_frame: the transition frame to append to the end
of this buffer
:type transition_frame: TransitionFrame
"""
# Add the transition_frame to the transitions array.
super().append_frame(transition_frame)
# Set the priority for this transition as the max priority.
self.update_priority((self._cur_idx - 1) % self.max_length, self._max_priority)
def sample(self, batch_size: int):
"""
Gets a number of samples equal equal to the batch size, each length
equal to the history length of this buffer.
:param batch_size: The number of samples to retrieve
:type batch_size: int
:return: the sample indexes and samples in lists
:rtype: tuple of lists
"""
if (not isinstance(batch_size, int) or batch_size < 1):
raise ValueError("The batch size must be a positive integer.")
result_idx = []
result = []
segment_length = self.sum_priority() / batch_size
for i in range(batch_size):
s = random.uniform(segment_length * i, segment_length * (i + 1))
sample_idx = self._sample_helper(0, s)
result_idx.append(sample_idx)
result.append(self.get_transitions(sample_idx))
return result_idx, result
def sum_priority(self):
"""
Returns the sum of priorities of all transitions in this
Prioritized Replay Buffer.
:return: the sum of all priorities
:rtype: float
"""
return self._priority_tree[0]
def update_error(self, idx: int, error: float):
"""
Updates the priority of the transition frame at the given idx given
the error of the transition.
:param idx: is the index of the transition to update the priority of
:type idx: int
:param error: is error of the transition
:type error: float
"""
if (not isinstance(error, float)):
raise ValueError("The error should be a float")
# Calculate the priority from the error.
priority = self._calculate_priority(error)
# Update the transition with the calculated priority.
self.update_priority(idx, priority)
def update_priority(self, idx: int, priority: float):
"""
Updates the priority of the transition frame at the given idx directly.
:param idx: is the index of the transition to update the priority of
:type idx: int
:param priority: is priority to give the transition
:type priority: float
"""
if (not isinstance(idx, int) or idx < 0 or idx >= self._size):
raise ValueError("The index must be a valid index in the range of the buffer.")
if (not isinstance(priority, float)):
raise ValueError("The priority must be a float.")
# Update the priority of the given transition and propagate to
# change to the parent nodes.
tree_idx = self._to_tree_idx(idx)
self._propagate(tree_idx, (priority ** self._alpha) - self._priority_tree[tree_idx])
# Update the max priority if necessary.
self._max_priority = max(self._max_priority, priority)
def _calculate_priority(self, error: float):
"""
Calculates the priority from the given error.
:param error: is the error
:type error: float
:return: the calculated priority
:rtype: float
"""
return (np.abs(error) + self._epsilon) ** self._alpha
def _propagate(self, idx: int, change: float):
"""
Recursively propogates the change in the priority of a node up the
SumTree until the change is propagated to the root.
:param idx: is the index for a node in the array to propagate the
change to
:type idx: int
:param change: the amount to change the priority by
:type change: float
"""
if (not isinstance(idx, int) or idx < 0 or idx >= len(self._priority_tree)):
raise ValueError("The index must be a valid index in the range of the tree.")
if (not isinstance(change, float)):
raise ValueError("The change must be a float.")
# Add the change in priority to this node.
self._priority_tree[idx] += change
# If this is not the root node, propagate the change to the parent.
if (idx > 0):
self._propagate((idx - 1) // 2, change)
def _sample_helper(self, idx: int, s: float):
"""
Recursive helper function for sampling a transition based on s.
Should only be called by PrioritizedReplayBuffer.sample.
:param idx: is the current index being looked at.
:type idx: int
:param s: the prefix sum to search for.
:type s: float
:return: the index of a sampled transition.
:rtype: int
"""
idx_left = 2 * idx + 1
if idx_left >= len(self._priority_tree):
return self._to_transition_idx(idx)
if s <= self._priority_tree[idx_left]:
return self._sample_helper(idx_left, s)
else:
return self._sample_helper(idx_left + 1, s - self._priority_tree[idx_left])
def _to_transition_idx(self, tree_idx: int):
"""
Calculates the corresponding transition index of the given tree
index.
:param tree_idx: is the index for a node in this SumTree
:type tree_idx: int
:return: the index for the transition that corresponds to given
tree node.
:rtype: int
"""
return tree_idx - self.max_length + 1
def _to_tree_idx(self, idx: int):
"""
Calculates the corresponding tree index of the given transition
index.
:param idx: is the index for a transition stored in this buffer.
:type idx: int
:return: the index for the tree node that corresponds to that
transition.
"""
return idx + self.max_length - 1
def _to_transition_idx(self, tree_idx: int):
"""
Calculates the corresponding transition index of the given tree
index.
:param tree_idx: is the index for a node in this SumTree
:type tree_idx: int
:return: the index for the transition that corresponds to given
tree node.
:rtype: int
"""
return tree_idx - self.max_length + 1
class HindsightReplayBuffer(ReplayBuffer):
"""
"""
def __init__(self, learner, max_length, empty_trans, history_length: int = 1):
"""
"""
super().__init__(learner, max_length, empty_trans, history_length)
self._hindsight_buffer = deque()
def append_frame(self, transition_frame):
"""
Appends a given framed to the buffer.
:param transition_frame: the transition frame to append to the end
of this buffer
:type transition_frame: TransitionFrame
"""
# Add the transition_frame to the transitions array.
super().append_frame(transition_frame)
self._hindsight_buffer.append(transition_frame)
def apply_hindsight(self):
"""
"""
goal = np.asarray(self._hindsight_buffer[-1].next_state)
while self._hindsight_buffer:
current = self._hindsight_buffer.popleft()
state = np.asarray(current.state)
reward = current.reward
is_done = False
if (np.sum(np.abs((state - goal))) == 0):
reward = 0
is_done = True
if (isinstance(current, TransitionFrame)):
super().append_frame(TransitionFrame(goal, -1, reward, goal, is_done))
elif (isinstance(current, ActionTransitionFrame)):
super().append_frame(ActionTransitionFrame(-1, goal, -1, reward, goal, is_done))
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,589 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/Util/paramFrame.py | import tkinter
from abc import ABC, abstractmethod
class ParamFrame(tkinter.Frame, ABC):
@abstractmethod
def getParameters(self):
pass
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,590 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/Collections/qTable.py | from abc import ABC
import numpy as np
import joblib
from Agents import modelFreeAgent
class QTable(modelFreeAgent.ModelFreeAgent, ABC):
displayName = 'Q Table'
newParameters = [modelFreeAgent.ModelFreeAgent.Parameter('Alpha', 0.00, 1.00, 0.01, 0.18, True, True, "The learning rate factor which determines how quickly we use new data")]
parameters = modelFreeAgent.ModelFreeAgent.parameters + newParameters
def __init__(self, *args):
paramLen = len(QTable.newParameters)
super().__init__(*args[:-paramLen])
(self.alpha,) = args[-paramLen:]
self.qtable = {}
def getQvalue(self, state, action):
return self.qtable.get((state, action), 0.0)
def choose_action(self, state):
q = [self.getQvalue(state, a) for a in range(self.action_size)]
maxQ = max(q)
epsilon = self.min_epsilon + (self.max_epsilon - self.min_epsilon) * np.exp(-self.decay_rate * self.time_steps)
# TODO: Put epsilon at a level near this
# if random.random() > epsilon:
action = q.index(maxQ)
# else:
# action = self.state_size.sample()
return action
def __compute_new_q_value(self):
pass
def save(self, filename):
joblib.dump((self.displayName, self.qtable), filename)
def load(self, filename):
name, mem = joblib.load(filename)
if name != self.displayName:
print('load failed')
else:
self.qtable = mem
print('load successful')
def memsave(self):
return self.qtable
def memload(self, mem):
self.qtable = mem
def reset(self):
self.qtable.clear()
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,591 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/sac.py | import joblib
import tensorflow as tf
from typing import Sequence
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Dense, Input, Flatten, multiply
import numpy as np
from Agents import modelFreeAgent
from Agents.Collections import ExperienceReplay
from Agents.Collections.TransitionFrame import TransitionFrame
tf.keras.backend.set_floatx('float64')
# Class to create Actor Network
class actorNetwork(Model):
"""
Source: https://github.com/RickyMexx/SAC-tf2/blob/master/common/utils.py.
"""
def __init__(self, action_dim):
super(actorNetwork, self).__init__()
self.logprob_epsilon = 1e-6
actor_bound = 3e-3
self.network = Sequential()
for i in range(2):
self.network.add(Dense(24, activation="relu"))
self.mean = Dense(action_dim,
kernel_initializer=tf.random_uniform_initializer(-actor_bound, actor_bound),
bias_initializer=tf.random_uniform_initializer(-actor_bound, actor_bound))
self.prob = Dense(action_dim,
kernel_initializer=tf.random_uniform_initializer(-actor_bound, actor_bound),
bias_initializer=tf.random_uniform_initializer(-actor_bound, actor_bound))
@tf.function
def call(self, inp):
x = self.network(inp)
mean = self.mean(x)
log_std = self.prob(x)
prob_clipped = tf.clip_by_value(log_std, -20, 2)
normal_dist = tf.compat.v1.distributions.Normal(mean, tf.exp(prob_clipped))
action = tf.stop_gradient(normal_dist.sample())
action_returned = tf.tanh(action)
prob = normal_dist.log_prob(action) - tf.math.log(1.0 - tf.pow(action_returned, 2) + self.logprob_epsilon)
prob = tf.reduce_sum(prob, axis=-1, keepdims=True)
return action_returned, prob
def _get_params(self):
with self.graph.as_default():
parameters = tf.trainable_variables()
name = [s.name for s in parameters]
value_return = self.sess.run(parameters)
params = {k: v for k, v in zip(name, value_return)}
return params
def __getstate__(self):
params = self._get_params()
state = self.args_copy, params
return state
def __setstate__(self, state):
args, params = state
self.__init__(**args)
self.restore_params(params)
def soft_update(source: Sequence[tf.Variable], target: Sequence[tf.Variable], tau: float):
if len(source) != len(target):
raise ValueError("source_vars and target_vars must have the same length.")
for source, target in zip(source, target):
target.assign((1.0 - tau) * target + tau * source)
return target
def force_update(source: Sequence[tf.Variable], target: Sequence[tf.Variable]):
soft_update(source, target, 1.0)
class SAC(modelFreeAgent.ModelFreeAgent):
displayName = 'SAC'
newParameters = [modelFreeAgent.ModelFreeAgent.Parameter('Batch Size', 1, 256, 1, 32, True, True,
"The number of transitions to consider simultaneously when updating the agent"),
modelFreeAgent.ModelFreeAgent.Parameter('Memory Size', 1, 655360, 1, 1000, True, True,
"The maximum number of timestep transitions to keep stored"),
modelFreeAgent.ModelFreeAgent.Parameter('Target Update Interval', 1, 100000, 1, 200, True, True,
"The distance in timesteps between target model updates"),
modelFreeAgent.ModelFreeAgent.Parameter('Tau', 0.00, 1.00, 0.001, 0.97, True, True,
"The rate at which target models update"),
modelFreeAgent.ModelFreeAgent.Parameter('Temperature', 0.00, 1.00, 0.001, 0.97, True, True,
"The rate at which target models update")
]
parameters = modelFreeAgent.ModelFreeAgent.parameters + newParameters
def __init__(self, *args):
# Initializing model parameters
paramLen = len(SAC.newParameters)
super().__init__(*args[:-paramLen])
self.batch_size, self.memory_size, self.target_update_interval, self.tau, self.temperature = [int(arg) for arg
in
args[-paramLen:]]
self.polyak = 0.01
self.total_steps = 0
empty_state = self.get_empty_state()
self.memory = ExperienceReplay.ReplayBuffer(self, self.memory_size,
TransitionFrame(empty_state, -1, 0, empty_state, False))
# Learning rate for actor-critic models
critic_lr = 0.002
actor_lr = 0.001
self.critic_optimizer = tf.keras.optimizers.Adam(critic_lr)
self.actor_optimizer = tf.keras.optimizers.Adam(actor_lr)
self.actor_network = actorNetwork(self.action_size)
self.soft_Q_network = self.q_network()
self.soft_Q_targetnetwork = self.q_network()
self.soft_Q_network1 = self.q_network()
self.soft_Q_targetnetwork1 = self.q_network()
# Building up 2 soft q-function with their relative targets
in1 = tf.keras.Input(shape=self.state_size, dtype=tf.float64)
in2 = tf.keras.Input(shape=self.action_size, dtype=tf.float64)
self.soft_Q_network([in1, in2])
self.soft_Q_targetnetwork([in1, in2])
force_update(self.soft_Q_network.variables, self.soft_Q_targetnetwork.variables)
self.soft_Q_network1([in1, in2])
self.soft_Q_targetnetwork1([in1, in2])
force_update(self.soft_Q_network1.variables, self.soft_Q_targetnetwork1.variables)
# Optimizers for the networks
self.softq_optimizer = tf.keras.optimizers.Adam(learning_rate=critic_lr)
self.softq_optimizer2 = tf.keras.optimizers.Adam(learning_rate=critic_lr)
self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=actor_lr)
def q_network(self):
# Generate critic network model
input_shape = self.state_size
inputA = Input(shape=input_shape)
inputB = Input(shape=(self.action_size,))
x = Flatten()(inputA)
x = Dense(24, input_dim=self.state_size, activation='relu')(x) # fully connected
x = Dense(24, activation='relu')(x)
x = Dense(self.action_size, activation='linear')(x)
outputs = multiply([x, inputB])
model = Model(inputs=[inputA, inputB], outputs=outputs)
model.compile(loss='mse', optimizer=self.critic_optimizer)
return model
def value_network(self):
# Generate critic network model
input_shape = self.state_size
inputA = Input(shape=input_shape)
x = Flatten()(inputA)
x = Dense(24, input_dim=self.state_size, activation='relu')(x) # fully connected
x = Dense(24, activation='relu')(x)
x = Dense(self.action_size, activation='linear')(x)
model = Model(inputs=[inputA], outputs=x)
model.compile(loss='mse', optimizer=self.critic_optimizer)
return model
def soft_q_value(self, states: np.ndarray, actions: np.ndarray):
return self.soft_Q_network(states, actions)
def soft_q_value1(self, states: np.ndarray, actions: np.ndarray):
return self.soft_Q_network1(states, actions)
def action(self, states):
"""Get action for a state."""
return self.actor_network(states)[0][0]
def actions(self, states):
"""Get actions for a batch of states."""
return self.actor_network(states)[0]
def choose_action(self, state):
"""Get the action for a single state."""
shape = (-1,) + self.state_size
state = np.reshape(state, shape)
u = self.action(state)
action_returned = u.numpy()[0]
action_returned = action_returned.astype(int)
return action_returned
def sample(self):
return self.memory.sample(self.batch_size)
def addToMemory(self, state, action, reward, new_state, done):
self.memory.append_frame(TransitionFrame(state, action, reward, new_state, done))
def remember(self, state, action, reward, new_state, done=False):
self.addToMemory(state, action, reward, new_state, done)
loss = 0
if len(self.memory) < 2 * self.batch_size:
return loss
_, mini_batch = self.sample()
states, actions, next_states, rewards, dones = self.learn(mini_batch)
states = states.astype(float)
next_states = next_states.astype(float)
# Evaluating action probability of doing an action
action, action_prob = self.actor_network(states)
val_target = self.soft_Q_network([next_states, actions])
val_target1 = self.soft_Q_network1([next_states, actions])
# Minimizing values
nextval_sample = tf.math.minimum(val_target, val_target1) - self.temperature * action_prob
# Getting Q function targets
Q_targets = rewards + self.gamma * (1 - dones) * nextval_sample
# softq_targets = tf.reshape(softq_targets, [self.batch_size, 1])
# Gradient descent for Q function - 1 and computing gradients
with tf.GradientTape() as qtape:
Q = self.soft_Q_network([states, actions])
Q_loss= tf.reduce_mean(tf.square(Q - Q_targets))
softq_gradients = qtape.gradient(Q_loss, self.soft_Q_network.trainable_weights)
# Gradient descent for Q function - 2 and computing gradients
with tf.GradientTape() as qtape2:
Q2 = self.soft_Q_network1([states, actions])
Q2_loss = tf.reduce_mean(tf.square(Q2 - Q_targets))
softq_gradients2 = qtape2.gradient(Q2_loss, self.soft_Q_network1.trainable_weights)
# Gradient ascent for policy and computing gradients
with tf.GradientTape() as tape:
# actions = self.actorModel()
actions, action_logprob = self.actor_network(states)
soft_Q = tf.math.minimum(self.soft_Q_network([states, actions]), self.soft_Q_network1([states, actions]))
# Calculating loss
loss_policy = tf.reduce_mean(action_logprob - soft_Q)
actor_gradients = tape.gradient(loss_policy, self.actor_network.trainable_weights)
# Apply gradients
self.actor_optimizer.apply_gradients(zip(actor_gradients, self.actor_network.trainable_weights))
self.softq_optimizer.apply_gradients(zip(softq_gradients, self.soft_Q_network.trainable_weights))
self.softq_optimizer2.apply_gradients(zip(softq_gradients2, self.soft_Q_network1.trainable_weights))
Q_loss = Q_loss.numpy()
self.updateTarget()
return Q_loss
def updateTarget(self):
if self.total_steps >= 2 * self.batch_size and self.total_steps % self.target_update_interval == 0:
# Update the weights of target networks
soft_update(self.soft_Q_network.variables, self.soft_Q_targetnetwork.variables, self.polyak)
soft_update(self.soft_Q_network1.variables, self.soft_Q_targetnetwork1.variables, self.polyak)
print("targets updated")
self.total_steps += 1
def create_one_hot(self, vector_length, hot_index):
output = np.zeros(vector_length)
if hot_index != -1:
output[hot_index] = 1
return output
def learn(self, mini_batch):
states = (np.zeros((self.batch_size,) + self.state_size))
actions = np.zeros((self.batch_size,) + (self.action_size,))
next_states = (np.zeros((self.batch_size,) + self.state_size))
rewards = np.zeros((self.batch_size,) + (self.action_size,))
dones = np.zeros((self.batch_size,) + (self.action_size,))
for index_rep, transition in enumerate(mini_batch):
states[index_rep] = transition.state
actions[index_rep] = self.create_one_hot(self.action_size, transition.action)
next_states[index_rep] = transition.next_state
rewards[index_rep] = transition.reward
dones[index_rep] = transition.is_done
return states, actions, next_states, rewards, dones
def predict(self, state, isTarget):
shape = (-1,) + self.state_size
state = np.reshape(state, shape)
# state = state(float)
if isTarget:
print("Target achieved")
else:
result1 = self.action(state)
return result1
def save(self, filename):
self.actor_network(np.reshape(self.get_empty_state(), (-1,) + self.state_size))
act_mem = self.actor_network.get_weights()
s0_mem = self.soft_Q_targetnetwork.get_weights()
s1_mem = self.soft_Q_targetnetwork1.get_weights()
mem = self.actor_network.get_weights()
joblib.dump((SAC.displayName, act_mem, s0_mem, s1_mem), filename)
print('Model saved')
def load(self, filename):
name, act_wt, s0_wt, s1_wt = joblib.load(filename)
if name != SAC.displayName:
print('load failed')
else:
self.actor_network(np.reshape(self.get_empty_state(), (-1,) + self.state_size))
self.actor_network.set_weights(act_wt)
self.soft_Q_targetnetwork.set_weights(s0_wt)
self.soft_Q_targetnetwork1.set_weights(s1_wt)
def memsave(self):
self.actor_network(np.reshape(self.get_empty_state(), (-1,) + self.state_size))
actor_weights = self.actor_network.get_weights()
soft0_weights = self.soft_Q_targetnetwork.get_weights()
soft1_weights = self.soft_Q_targetnetwork1.get_weights()
return (actor_weights, soft0_weights, soft1_weights)
def memload(self, mem):
act_wt, s0_wt, s1_wt = mem
self.actor_network(np.reshape(self.get_empty_state(), (-1,) + self.state_size))
self.actor_network.set_weights(act_wt)
self.soft_Q_targetnetwork.set_weights(s0_wt)
self.soft_Q_targetnetwork1.set_weights(s1_wt)
def reset(self):
pass
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,592 | RobertCordingly/easyRL-v0 | refs/heads/master | /MVC/terminalView.py | from Agents import qLearning, drqn, deepQ, adrqn, agent, doubleDuelingQNative, drqnNative, drqnConvNative, ppoNative, reinforceNative, actorCriticNative, cem, npg, ddpg, sac, trpo, rainbow
from Environments import cartPoleEnv, cartPoleEnvDiscrete, atariEnv, frozenLakeEnv, pendulumEnv, acrobotEnv, mountainCarEnv
from MVC.model import Model
from Agents.sarsa import sarsa
import time, os
class View:
agents = [qLearning.QLearning, sarsa, deepQ.DeepQ, deepQ.DeepQPrioritized, deepQ.DeepQHindsight, drqn.DRQN, drqn.DRQNPrioritized, drqn.DRQNHindsight, adrqn.ADRQN, adrqn.ADRQNPrioritized, adrqn.ADRQNHindsight, npg.NPG, ddpg.DDPG, cem.CEM, sac.SAC, trpo.TRPO, rainbow.Rainbow]
environments = [cartPoleEnv.CartPoleEnv, cartPoleEnvDiscrete.CartPoleEnvDiscrete, frozenLakeEnv.FrozenLakeEnv,
pendulumEnv.PendulumEnv, acrobotEnv.AcrobotEnv, mountainCarEnv.MountainCarEnv]
environments += atariEnv.AtariEnv.subEnvs
def __init__(self, listener):
self.listener = listener
self.isHalted = False
self.isTrained = False
self.episodeNum = None
self.dataPoints = []
self.episodeStates = []
self.paramValues = None
self.environment = self.chooseEnvironment()
self.agentType = self.chooseAgent()
while not self.isHalted:
self.mainMenu()
def chooseEnvironment(self):
count = 1
valid = False
while not valid:
text = '\nChoose an environment:\n'
for env in View.environments:
text += str(count) + ') ' + env.displayName + '\n'
count += 1
choice = input(text)
try:
choice = int(choice)
if 1 <= choice <= len(View.environments):
self.listener.setEnvironment(0, View.environments[choice-1])
valid = True
except ValueError:
print('Input must be an integer')
pass
def chooseAgent(self):
count = 1
valid = False
selectedAgent = None
while not valid:
text = '\nChoose an agent:\n'
for agent in View.agents:
text += str(count) + ') ' + agent.displayName + '\n'
count += 1
choice = input(text)
try:
choice = int(choice)
if 1 <= choice <= len(View.agents):
selectedAgent = View.agents[choice - 1]
self.listener.setAgent(0, selectedAgent)
valid = True
except ValueError:
print('Input must be an integer')
pass
return selectedAgent
def chooseParameters(self):
params = []
paramValues = []
params.append(agent.Agent.Parameter('Total Episodes', 1, float('inf'), 1, 1000, True, True, 'The number of episodes to train the agent'))
params.append(agent.Agent.Parameter('Max Steps', 1, float('inf'), 1, 200, True, True, 'The maximum number of steps per episodes'))
params += self.agentType.parameters
count = 1
print('Choose agent hyperparameters:')
for param in params:
text = '\n' + str(count) + ') ' + param.name + ' (' + param.toolTipText + ')\n'
text += 'must be between ' + str(param.min) + ' and ' + str(param.max) + '\n'
count += 1
valid = False
while not valid:
choice = input(text)
try:
choice = int(choice) if param.resolution % 1 == 0 else float(choice)
if param.min <= choice <= param.max:
paramValues.append(choice)
valid = True
else:
print('Input not within range')
except ValueError:
print('Input must be a number')
pass
return paramValues
def mainMenu(self):
count = 1
text = '\n' + str(count) + ') Start training\n'
count += 1
text += str(count) + ') Load saved agent\n'
count += 1
if self.isTrained:
text += str(count) + ') Start testing\n'
count += 1
text += str(count) + ') Save trained agent\n'
count += 1
text += str(count) + ') Exit\n'
count += 1
valid = False
choice = None
while not valid:
choice = input(text)
try:
choice = int(choice)
if 1 <= choice <= count-1:
valid = True
else:
print('Input not within range')
except ValueError:
print('Input must be an integer')
pass
if choice == count-1:
self.isHalted = True
elif choice == 1:
self.paramValues = self.chooseParameters()
self.listener.startTraining(0, self.paramValues)
self.isTrained = True
self.episodeNum = 0
self.dataPoints.clear()
self.episodeStates.clear()
while self.checkMessages():
time.sleep(0.1)
elif choice == 2:
workingDir = os.getcwd()
loaded = False
while not loaded:
text = 'Type the filename of the agent to load:\n'
filename = input(text)
fullpath = workingDir + '/' + filename
try:
self.listener.load(fullpath, 0)
loaded = True
self.isTrained = True
except:
print('Invalid filename')
elif choice == 3:
self.paramValues = self.chooseParameters()
self.listener.startTesting(0, self.paramValues)
self.episodeNum = 0
self.dataPoints.clear()
self.episodeStates.clear()
while self.checkMessages():
time.sleep(0.1)
elif choice == 4:
text = 'Type the filename to save the agent as:\n'
workingDir = os.getcwd()
filename = input(text)
fullpath = workingDir + '/' + filename
self.listener.save(fullpath, 0)
def checkMessages(self):
while self.listener.getQueue(0).qsize():
message = self.listener.getQueue(0).get(timeout=0)
if message.type == Model.Message.EVENT:
if message.data == Model.Message.EPISODE:
self.episodeNum += 1
lastEpsilon = self.episodeStates[-1].epsilon
totalReward = sum([state.reward for state in self.episodeStates])
avgLoss = None if not self.episodeStates[0].loss else sum([state.loss for state in self.episodeStates])/len(self.episodeStates)
print('Episode ' + str(self.episodeNum) + ': episilon = ' + str(lastEpsilon) + ', reward = ' + str(totalReward) + ', loss = ' + str(avgLoss))
self.dataPoints.append((lastEpsilon, totalReward, avgLoss))
self.episodeStates.clear()
elif message.data == Model.Message.TRAIN_FINISHED:
totalReward = sum([reward for _, reward, _ in self.dataPoints])
avgReward = totalReward/len(self.dataPoints)
print('Total Training Reward: ' + str(totalReward))
print('Reward/Episode: ' + str(avgReward))
return False
elif message.data == Model.Message.TEST_FINISHED:
totalReward = sum([reward for _, reward, _ in self.dataPoints])
avgReward = totalReward / len(self.dataPoints)
print('Total Test Reward: ' + str(totalReward))
print('Reward/Episode: ' + str(avgReward))
return False
elif message.type == Model.Message.STATE:
self.episodeStates.append(message.data)
return True
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,593 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/ppo.py | from Agents import agent, modelFreeAgent
from Agents.deepQ import DeepQ
from Agents.models import Actor, Critic
from Agents.Collections import ExperienceReplay
from Agents.Collections.TransitionFrame import TransitionFrame
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Conv2D
from tensorflow.keras.layers import Flatten, TimeDistributed, LSTM, multiply
from tensorflow.keras import utils
from tensorflow.keras.losses import KLDivergence
from tensorflow.keras.optimizers import Adam
#from tensorflow_probability.distributions import Multinomial
class PPO(DeepQ):
displayName = 'PPO'
newParameters = [DeepQ.Parameter('Policy learning rate', 0.00001, 1, 0.00001, 0.001, True, True,
"A learning rate that the Adam optimizer starts at"),
DeepQ.Parameter('Value learning rate', 0.00001, 1, 0.00001, 0.001,
True, True,
"A learning rate that the Adam optimizer starts at"),
DeepQ.Parameter('Horizon', 10, 10000, 1, 50,
True, True,
"The number of timesteps over which the returns are calculated"),
DeepQ.Parameter('Epoch Size', 10, 100000, 1, 500,
True, True,
"The length of each epoch (likely should be the same as the max episode length)"),
DeepQ.Parameter('PPO Epsilon', 0.00001, 0.5, 0.00001, 0.2,
True, True,
"A measure of how much a policy can change w.r.t. the states it's trained on"),
DeepQ.Parameter('PPO Lambda', 0.5, 1, 0.001, 0.95,
True, True,
"A parameter that when set below 1, can decrease variance while maintaining reasonable bias")]
parameters = DeepQ.parameters + newParameters
def __init__(self, *args):
paramLen = len(PPO.newParameters)
super().__init__(*args[:-paramLen])
empty_state = self.get_empty_state()
# Initialize parameters
self.memory = ExperienceReplay.ReplayBuffer(self, self.memory_size, TransitionFrame(empty_state, -1, 0, empty_state, False))
self.total_steps = 0
self.actorIts = 2
self.allMask = np.full((1, self.action_size), 1)
self.allBatchMask = np.full((self.batch_size, self.action_size), 1)
self.policy_lr = 0.001
self.value_lr = 0.001
self.policy_model = Actor(self.state_size, self.action_size, self.policy_lr).policy_network()
self.value_model = Critic(self.state_size, self.action_size, self.value_lr).value_network()
def sample(self):
return self.memory.sample(self.actorIts)
def policy_network(self):
return self.policy_model
def value_network(self):
return self.value_model
def addToMemory(self, state, action, reward, new_state, done):
self.memory.append_frame(TransitionFrame(state, action, reward, new_state, done))
def choose_action(self, state):
shape = (1,) + self.state_size
state = np.reshape(state, shape)
val = self.value_model.predict([state, self.allMask])
action = np.argmax(val)
print("action: " + str(action))
return action
def get_probabilities(self, states):
probabilities = self.policy_model.predict([states, self.allBatchMask])
return probabilities
def remember(self, state, action, reward, new_state, done):
pass
def predict(self, state, isTarget):
pass
def update(self):
pass
def create_one_hot(self, vector_length, hot_index):
output = np.zeros((vector_length))
if hot_index != -1:
output[hot_index] = 1
return output
def reset(self):
pass
def __deepcopy__(self, memodict={}):
pass
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,594 | RobertCordingly/easyRL-v0 | refs/heads/master | /MVC/view.py | import tkinter
from tkinter import ttk
from tkinter import filedialog, W
from tkinter import messagebox
from tkinter.ttk import Style
from ttkthemes import ThemedTk
from PIL import Image
from PIL import ImageTk
from PIL.ImageTk import PhotoImage
import ttkwidgets
from Agents import qLearning, drqn, deepQ, adrqn, agent, doubleDuelingQNative, drqnNative, drqnConvNative, ppoNative, reinforceNative, actorCriticNative, cem, npg, ddpg, sac, ppo, trpo, rainbow
from Agents.Collections import qTable
from Environments import cartPoleEnv, cartPoleEnvDiscrete, atariEnv, frozenLakeEnv, pendulumEnv, acrobotEnv, \
mountainCarEnv
from MVC import helptext
from MVC.model import Model
from Agents.sarsa import sarsa
import importlib.util
about = """
software requirements:
Our code can be run on Mac Linux or Windows PC Operating systems with Visual Studio C++ build tools
Requires python 3.7 and # pytorch 1.6
# Further requires Tensorflow 2.1, Keras, Kivy and other packages, see Readme.txt for an explanation
and requirements.txt for details.
EasyRL was created by the following students at the university of washington tacoma:
Neil Hulbert, Sam Spillers, Brandon Francis, James Haines-Temons, Ken Gil Romero
Sam Wong, Kevin Flora, Bowei Huang
"""
class View:
agents = [deepQ.DeepQ, deepQ.DeepQPrioritized, deepQ.DeepQHindsight, qLearning.QLearning, drqn.DRQN, drqn.DRQNPrioritized, drqn.DRQNHindsight, adrqn.ADRQN, adrqn.ADRQNPrioritized, adrqn.ADRQNHindsight, doubleDuelingQNative.DoubleDuelingQNative, drqnNative.DRQNNative, drqnConvNative.DRQNConvNative, ppoNative.PPONative, reinforceNative.ReinforceNative, actorCriticNative.ActorCriticNative, sarsa, cem.CEM, npg.NPG, ddpg.DDPG, sac.SAC, trpo.TRPO, rainbow.Rainbow]
singleDimEnvs = [cartPoleEnv.CartPoleEnv, cartPoleEnvDiscrete.CartPoleEnvDiscrete, frozenLakeEnv.FrozenLakeEnv,
pendulumEnv.PendulumEnv, acrobotEnv.AcrobotEnv, mountainCarEnv.MountainCarEnv]
environments = singleDimEnvs + atariEnv.AtariEnv.subEnvs
allowedEnvs = {
deepQ.DeepQ: singleDimEnvs,
deepQ.DeepQPrioritized: singleDimEnvs,
deepQ.DeepQHindsight: singleDimEnvs,
qLearning.QLearning: [cartPoleEnvDiscrete.CartPoleEnvDiscrete, frozenLakeEnv.FrozenLakeEnv],
drqn.DRQN: environments,
drqn.DRQNPrioritized: environments,
drqn.DRQNHindsight: environments,
adrqn.ADRQN: environments,
adrqn.ADRQNPrioritized: environments,
adrqn.ADRQNHindsight: environments,
doubleDuelingQNative.DoubleDuelingQNative: singleDimEnvs,
drqnNative.DRQNNative: singleDimEnvs,
drqnConvNative.DRQNConvNative: atariEnv.AtariEnv.subEnvs,
ppoNative.PPONative: singleDimEnvs,
reinforceNative.ReinforceNative: singleDimEnvs,
actorCriticNative.ActorCriticNative: singleDimEnvs,
sarsa: [cartPoleEnvDiscrete.CartPoleEnvDiscrete, frozenLakeEnv.FrozenLakeEnv],
trpo.TRPO: singleDimEnvs,
rainbow.Rainbow: singleDimEnvs,
cem.CEM: environments,
npg.NPG: environments,
ddpg.DDPG: environments,
sac.SAC: environments
}
allowedEnvs = {agent.displayName:[env.displayName for env in envs] for (agent, envs) in allowedEnvs.items()}
allowedAgents = {}
for agent, envs in allowedEnvs.items():
for env in envs:
curAgents = allowedAgents.get(env)
if not curAgents:
curAgents = []
allowedAgents[env] = curAgents
curAgents.append(agent)
"""
:param master: the top level widget of Tk
:type master: tkinter.Tk
:param listener: the listener object that will handle user input
:type listener: controller.ViewListener
"""
def __init__(self, listener):
self.root = ThemedTk(theme='keramik')
self.root.resizable(False, False)
self.root.geometry('1100x1080')
self.root.configure(bg="gray80")
self.root.title('EasyRL')
# self.root.attributes('-fullscreen', True)
self.listener = listener
pw = View.ProjectWindow(self.root, listener)
self.menubar = tkinter.Menu(self.root)
self.mMenuFile = tkinter.Menu(self.menubar, tearoff=0)
self.mMenuFile.add_command(label="Load Agent", command=pw.loadAgent)
self.mMenuFile.add_command(label="Load Environment", command=pw.loadEnv)
self.mMenuFile.add_command(label="Close Tab", command=pw.closeTab, state=tkinter.DISABLED)
pw.mMenuFile = self.mMenuFile
self.mMenuFile.add_command(label="Reset Tab", command=pw.rechoose)
self.mMenuFile.add_command(label="Save Model", command=pw.save)
self.mMenuFile.add_command(label="Load Model", command=pw.load)
self.mMenuFile.add_command(label="Save Results", command=pw.saveResults)
self.mMenuFile.add_separator()
self.mMenuFile.add_command(label="Exit", command=self.delete_window)
self.menubar.add_cascade(label="File", menu=self.mMenuFile)
self.mMenuRun = tkinter.Menu(self.menubar, tearoff=0)
self.mMenuRun.add_command(label="Train", command=pw.train)
self.mMenuRun.add_command(label="Halt", command=pw.halt)
self.mMenuRun.add_command(label="Test", command=pw.test)
self.mMenuRun.add_command(label="Reset", command=pw.reset)
self.menubar.add_cascade(label="Run", menu=self.mMenuRun)
self.mMenuHelp = tkinter.Menu(self.menubar, tearoff=0)
self.mMenuHelp.add_command(label="Help", command=self.helpMenu)
self.mMenuHelp.add_command(label="About", command=self.about)
self.menubar.add_cascade(label="Help", menu=self.mMenuHelp)
self.root.config(menu=self.menubar)
center(self.root)
self.root.protocol("WM_DELETE_WINDOW", self.delete_window)
self.root.mainloop()
def about(self):
popup = tkinter.Tk()
popup.wm_title("About")
popup.geometry("1000x1000")
texts = about
sbar = tkinter.Scrollbar(popup)
sbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
text = tkinter.Text(popup, height=1000, width=1000)
text.configure(yscrollcommand=sbar.set)
text.pack(expand=0, fill=tkinter.BOTH)
text.insert(tkinter.END, texts)
sbar.config(command=text.yview)
text.config(state="disabled")
center(popup)
popup.mainloop()
def helpMenu(self):
popup = tkinter.Tk()
popup.wm_title("Help")
popup.geometry("1000x1000")
texts = helptext.getHelpGettingStarted()
sbar = tkinter.Scrollbar(popup)
sbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
text = tkinter.Text(popup, height=1000, width=1000)
text.configure(yscrollcommand=sbar.set)
text.pack(expand=0, fill=tkinter.BOTH)
text.insert(tkinter.END, texts)
sbar.config(command=text.yview)
text.config(state="disabled")
center(popup)
popup.mainloop()
def delete_window(self):
self.listener.haltAll()
try:
self.root.destroy()
except:
pass
class CreateToolTip(
object): # Source: https://stackoverflow.com/questions/3221956/how-do-i-display-tooltips-in-tkinter
def __init__(self, widget, text='widget info'):
self.waittime = 500 # miliseconds
self.wraplength = 180 # pixels
self.widget = widget
self.text = text
self.widget.bind("<Enter>", self.enter)
self.widget.bind("<Leave>", self.leave)
self.widget.bind("<ButtonPress>", self.leave)
self.id = None
self.tw = None
def enter(self, event=None):
self.schedule()
def leave(self, event=None):
self.unschedule()
self.hidetip()
def schedule(self):
self.unschedule()
self.id = self.widget.after(self.waittime, self.showtip)
def unschedule(self):
id = self.id
self.id = None
if id:
self.widget.after_cancel(id)
def showtip(self, event=None):
x = y = 0
x, y, cx, cy = self.widget.bbox("insert")
x += self.widget.winfo_rootx() + 25
y += self.widget.winfo_rooty() + 20
# creates a toplevel window
self.tw = tkinter.Toplevel(self.widget)
# Leaves only the label and removes the app window
self.tw.wm_overrideredirect(True)
self.tw.wm_geometry("+%d+%d" % (x, y))
label = tkinter.Label(self.tw, text=self.text, justify='left',
background="#ffffff", relief='solid', borderwidth=1,
wraplength=self.wraplength)
label.pack(ipadx=1)
def hidetip(self):
tw = self.tw
self.tw = None
if tw:
tw.destroy()
class Window:
def __init__(self, master, listener):
self.master = master
self.listener = listener
self.frame = ttk.Frame(master)
for i in range(10):
self.frame.grid_columnconfigure(i, minsize=75)
self.frame.grid_rowconfigure(i, minsize=50)
def goBack(self):
self.frame.destroy()
class ProjectWindow(Window):
def __init__(self, master, listener):
super().__init__(master, listener)
self.master = master
self.listener = listener
self.tabIDCounter = 0
# self.closeTabButton = ttk.Button(self.frame, text='Close Current Tab', command=self.closeTab)
# self.closeTabButton.grid(row=0, column=0)
# close_button_ttp = View.CreateToolTip(self.closeTabButton, "Close Current Tab")
# self.rechooseButton = ttk.Button(self.frame, text='Reset Current Tab', command=self.rechoose)
# self.rechooseButton.grid(row=0, column=1)
# reset_button_ttp = View.CreateToolTip(self.rechooseButton, "Reset Current Tab")
# self.loadEnvButton = ttk.Button(self.frame, text='Load Environment', command=self.loadEnv)
# self.loadEnvButton.grid(row=0, column=2)
# load_env_button_ttp = View.CreateToolTip(self.loadEnvButton, "Load Custom Environment")
# self.loadAgentButton = ttk.Button(self.frame, text='Load Agent', command=self.loadAgent)
# self.loadAgentButton.grid(row=0, column=3)
# load_agent_button_ttp = View.CreateToolTip(self.loadAgentButton, "Load Custom Agent")
tempFrame = tkinter.Frame(self.frame)
train = tkinter.Button(tempFrame, text='Train', command=self.train)
train.pack(side='left')
train_button_ttp = View.CreateToolTip(train, "Train the agent with the current settings")
halt = tkinter.Button(tempFrame, text='Halt', command=self.halt)
halt.pack(side='left')
halt_button_ttp = View.CreateToolTip(halt, "Pause the current training")
test = tkinter.Button(tempFrame, text='Test', command=self.test)
test.pack(side='left')
test_button_ttp = View.CreateToolTip(test, "Test the agent in its current state")
save = tkinter.Button(tempFrame, text='Save Model', command=self.save)
save.pack(side='left')
load = tkinter.Button(tempFrame, text='Load Model', command=self.load)
load.pack(side='left')
save_button_ttp = View.CreateToolTip(save, "Save the model in its current state")
# load = ttk.Button(tempFrame, text='Load Agent', command=self.loadAgent)
# load.pack(side='left')
# btnLoadEnv = ttk.Button(tempFrame, text='Load Environment', command=self.loadEnv)
# btnLoadEnv.pack(side='left')
load_button_ttp = View.CreateToolTip(load, "Load a model")
reset = tkinter.Button(tempFrame, text='Reset', command=self.reset)
reset.pack(side='left')
reset_button_ttp = View.CreateToolTip(reset, "Reset the current agent and its parameters")
save_results = tkinter.Button(tempFrame, text='Save Results', command=self.saveResults)
save_results.pack(side='left')
save_results_button_ttp = View.CreateToolTip(save_results,
"Save the results of the current training session")
tempFrame.grid(row=0, column=0, columnspan=9, sticky=W)
self.tab = ttk.Notebook(self.frame)
self.tab.bind("<<NotebookTabChanged>>", self.tabChange)
self.tabs = [View.GeneralTab(self.tab, listener, self.tabIDCounter, self.frame, self.master)]
for tab in self.tabs:
self.tab.add(tab, text='Tab ' + str(self.tabIDCounter + 1))
self.tabIDCounter += 1
addTab = ttk.Frame(self.tab)
self.tab.add(addTab, text='+')
self.tabs.append(addTab)
self.tab.grid(row=1, column=0, rowspan=9, columnspan=9, sticky='wens')
self.frame.pack()
self.frame.lift()
def tabChange(self, event):
tabIndex = event.widget.index('current')
if len(self.tabs) > 1 and tabIndex == len(self.tabs) - 1:
newTab = View.GeneralTab(self.tab, self.listener, self.tabIDCounter, self.frame, self.master)
self.tab.forget(self.tabs[-1])
self.tab.add(newTab, text='Tab ' + str(self.tabIDCounter + 1))
self.tab.add(self.tabs[-1], text='+')
self.tabs = self.tabs[:-1] + [newTab] + [self.tabs[-1]]
self.tab.select(newTab)
self.tabIDCounter += 1
self.mMenuFile.entryconfig(2, state=tkinter.NORMAL)
def closeTab(self):
if len(self.tabs) != 2:
tkId = self.tab.select()
curTab = self.tab.nametowidget(tkId)
curTab.close()
ind = 0
while self.tabs[ind] != curTab:
ind += 1
self.tabs = self.tabs[:ind] + self.tabs[ind + 1:]
if ind == len(self.tabs) - 1:
self.tab.select(self.tabs[-2])
self.tab.forget(tkId)
self.tabIDCounter = self.tabs[-2].tabID + 1
if len(self.tabs) == 2:
self.mMenuFile.entryconfig(2, state=tkinter.DISABLED)
def rechoose(self):
tkId = self.tab.select()
curTab = self.tab.nametowidget(tkId)
if not curTab.listener.modelIsRunning(curTab.tabID):
curTab.parameterFrame.destroy()
curTab.parameterFrame = View.GeneralTab.ModelChooser(curTab)
curTab.parameterFrame.grid(row=0, column=0, rowspan=9)
curTab.slowLabel.grid_forget()
curTab.slowSlider.grid_forget()
curTab.render.grid_forget()
curTab.displayedEpisodeNum.grid_forget()
curTab.curEpisodeNum.grid_forget()
curTab.graph.grid_forget()
# curTab.graphLine.grid_forget()
curTab.xAxisLabel.grid_forget()
curTab.legend.grid_forget()
# curTab.space.grid_forget()
def train(self):
tkId = self.tab.select()
curTab = self.tab.nametowidget(tkId)
# print(hasattr(curTab.parameterFrame, "train"))
if not curTab.listener.modelIsRunning(curTab.tabID) and curTab.parameterFrame.isParameterFrame:
curTab.parameterFrame.master.train()
def halt(self):
tkId = self.tab.select()
curTab = self.tab.nametowidget(tkId)
if curTab.parameterFrame.isParameterFrame:
curTab.parameterFrame.master.halt()
def test(self):
tkId = self.tab.select()
curTab = self.tab.nametowidget(tkId)
if not curTab.listener.modelIsRunning(curTab.tabID) and curTab.parameterFrame.isParameterFrame:
curTab.parameterFrame.master.test()
def save(self):
tkId = self.tab.select()
curTab = self.tab.nametowidget(tkId)
if not curTab.listener.modelIsRunning(curTab.tabID) and curTab.parameterFrame.isParameterFrame:
curTab.parameterFrame.master.save()
def load(self):
tkId = self.tab.select()
curTab = self.tab.nametowidget(tkId)
if not curTab.listener.modelIsRunning(curTab.tabID) and curTab.parameterFrame.isParameterFrame:
curTab.parameterFrame.master.load()
def reset(self):
tkId = self.tab.select()
curTab = self.tab.nametowidget(tkId)
if not curTab.listener.modelIsRunning(curTab.tabID) and curTab.parameterFrame.isParameterFrame:
curTab.parameterFrame.master.reset()
def saveResults(self):
tkId = self.tab.select()
curTab = self.tab.nametowidget(tkId)
if not curTab.listener.modelIsRunning(curTab.tabID) and curTab.parameterFrame.isParameterFrame:
curTab.parameterFrame.master.saveResults()
def loadEnv(self):
filename = filedialog.askopenfilename(initialdir="/", title="Select file")
spec = importlib.util.spec_from_file_location("customenv", filename)
try:
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
tkId = self.tab.select()
curTab = self.tab.nametowidget(tkId)
if not curTab.listener.modelIsRunning(curTab.tabID):
# curTab.parameterFrame.envOpts.set(mod.CustomEnv.displayName)
# curTab.parameterFrame.selevUpdate()
# curTab.parameterFrame.slev.config(text='Selected Environment: ' + mod.CustomEnv.displayName)
# self.tabs[0].parameterFrame.slev
# self.slev.config(text='Selected Environment: ' + mod.CustomEnv.displayName)
View.environments = [mod.CustomEnv] + View.environments
for ind, tab in enumerate(self.tabs):
if isinstance(tab, View.GeneralTab) and isinstance(tab.parameterFrame,
View.GeneralTab.ModelChooser):
tab.parameterFrame.destroy()
tab.parameterFrame = View.GeneralTab.ModelChooser(tab)
tab.parameterFrame.grid(row=0, column=0, rowspan=9)
except:
pass
def loadAgent(self):
filename = filedialog.askopenfilename(initialdir="/", title="Select file")
spec = importlib.util.spec_from_file_location("customagent", filename)
try:
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
tkId = self.tab.select()
curTab = self.tab.nametowidget(tkId)
if not curTab.listener.modelIsRunning(curTab.tabID):
# curTab.parameterFrame.agentOpts.set(mod.CustomAgent.displayName)
# curTab.parameterFrame.selagUpdate()
# curTab.parameterFrame.slag.config(text='Selected Agent: ' + mod.CustomAgent.displayName)
View.agents = [mod.CustomAgent] + View.agents
#
for ind, tab in enumerate(self.tabs):
if isinstance(tab, View.GeneralTab) and isinstance(tab.parameterFrame,
View.GeneralTab.ModelChooser):
tab.parameterFrame.destroy()
tab.parameterFrame = View.GeneralTab.ModelChooser(tab)
tab.parameterFrame.grid(row=0, column=0, rowspan=9)
except:
pass
class GeneralTab(ttk.Frame):
def __init__(self, tab, listener, tabID, frame, master):
super().__init__(tab)
self.root = master
self.frame = frame
self.tabID = tabID
self.image = None
self.imageQueues = ([], [])
self.imageQueuesInd = 0
self.curImageIndDisplayed = 0
self.isDisplayingEpisode = False
self.waitCount = 0
self.renderImage = None
self.trainingEpisodes = 0
self.prevDisplayedEpisode = None
self.curTotalEpisodes = None
self.graphDataPoints = []
self.smoothedDataPoints = []
self.curLossAccum = 0
self.curRewardAccum = 0
self.curEpisodeSteps = 0
self.episodeAccLoss = 0
self.episodeAccReward = 0
self.episodeAccEpsilon = 0
self.smoothAmt = 20
self.rewardGraphMin = 0
self.rewardGraphMax = 100
self.lossGraphMax = 100
self.graphBottomMargin = 50
self.listener = listener
# frame = tkinter.Frame(self)
# frame.grid(row=0, column=0, columnspan=2)
# ttk.Label(frame, text='Number of Episodes', width=18, anchor='w').pack(side="left", padx=(5,0), pady=10)
# ttkwidgets.tickscale.TickScale(self, from_=1, to=655360, resolution=1, orient=tkinter.HORIZONTAL)
# self.numEpsVar = tkinter.StringVar()
# self.numEps = ttk.Entry(frame, textvariable=self.numEpsVar).pack(side="left", padx=(195,0))
# # numEps_ttp = View.CreateToolTip(self.numEps, "The number of episodes to run the model on")
# self.numEpsVar.set('1000')
#
# frame2 = tkinter.Frame(self)
# frame2.grid(row=1, column=0, columnspan=2)
# ttk.Label(frame2, text='Max Steps', width=18, anchor='w').pack(side="left", padx=(5,0), pady=10)
# ttkwidgets.tickscale.TickScale(self, from_=1, to=655360, resolution=1, orient=tkinter.HORIZONTAL)
# self.maxStepsVar = tkinter.StringVar()
# self.maxSteps = ttk.Entry(frame2, textvariable=self.maxStepsVar).pack(side="left", padx=(195,0))
# # maxSteps_ttp = View.CreateToolTip(self.maxSteps, "The max number of timesteps permitted in an episode")
# self.maxStepsVar.set('200')
# Add model parameters here
self.parameterFrame = self.ModelChooser(self)
self.parameterFrame.grid(row=0, column=0, rowspan=9)
# self.slowLabel = ttk.Label(self, text='Displayed episode speed')
# self.slowLabel.grid(row=7, column=0)
# self.slowSlider = ttkwidgets.tickscale.TickScale(self, from_=1, to=20, resolution=1, orient=tkinter.HORIZONTAL)
# slowSlider_ttp = View.CreateToolTip(self.slowSlider, "The speed at which to display the episodes")
# self.slowSlider.set(10)
# self.slowSlider.grid(row=7, column=1)
#
# self.render = tkinter.Canvas(self, background='#eff0f1')
# self.render.grid(row=0, column=2, rowspan=9, columnspan=8, sticky='wens')
#
# self.displayedEpisodeNum = ttk.Label(self, text='')
# self.displayedEpisodeNum.grid(row=9, column=2)
#
# self.curEpisodeNum = ttk.Label(self, text='')
# self.curEpisodeNum.grid(row=9, column=3)
#
# self.graph = tkinter.Canvas(self, background='#eff0f1')
# self.graph.grid(row=10, column=2, rowspan=4, columnspan=8, sticky='wens')
# self.graphLine = self.graph.create_line(0,0,0,0, fill='black')
# self.graph.bind("<Motion>", self.updateGraphLine)
# self.drawAxis()
#
# self.legend = tkinter.Canvas(self, background='#eff0f1')
# self.legend.grid(row=10, column=0, rowspan=4, columnspan=2, sticky='wens')
# self.legend.bind('<Configure>', self.legendResize)
def legendResize(self, evt):
self.legend.delete('all')
h = evt.height
p1, p2, p3, p4, p5, p6 = h / 6, 2 * h / 6, 3 * h / 6, 4 * h / 6, 5 * h / 6, 9 * h / 10
self.legend.create_line(40, p1, 90, p1, fill='blue')
self.legend.create_line(40, p2, 90, p2, fill='red')
self.legend.create_line(40, p3, 90, p3, fill='green')
self.lossLegend = self.legend.create_text(100, p1, text='MSE Episode Loss:', anchor='w')
self.rewardLegend = self.legend.create_text(100, p2, text='Episode Reward:', anchor='w')
self.epsilonLegend = self.legend.create_text(100, p3, text='Epsilon:', anchor='w')
self.episodelegend = self.legend.create_text(100, p4, text='Episode:', anchor='w')
self.testResult1 = self.legend.create_text(100, p5, text='', anchor='w')
self.testResult2 = self.legend.create_text(100, p6, text='', anchor='w')
def updateGraphLine(self, evt):
xVal = evt.x
height = self.graph.winfo_height()
self.graph.coords(self.graphLine, [xVal, 0, xVal, height])
if self.curTotalEpisodes:
smoothIndex = (int)(self.curTotalEpisodes * xVal / self.graph.winfo_width()) - self.smoothAmt
if len(self.smoothedDataPoints) > smoothIndex >= 0:
loss, reward, epsilon = self.smoothedDataPoints[smoothIndex]
self.legend.itemconfig(self.lossLegend, text='MSE Episode Loss: {:.4f}'.format(loss))
self.legend.itemconfig(self.rewardLegend, text='Episode Reward: ' + str(reward))
self.legend.itemconfig(self.epsilonLegend, text='Epsilon: {:.4f}'.format(epsilon))
self.legend.itemconfig(self.episodelegend, text='Episode: ' + str(smoothIndex + self.smoothAmt))
else:
self.legend.itemconfig(self.lossLegend, text='MSE Episode Loss:')
self.legend.itemconfig(self.rewardLegend, text='Episode Reward:')
self.legend.itemconfig(self.epsilonLegend, text='Epsilon:')
self.legend.itemconfig(self.episodelegend, text='Episode:')
def halt(self):
self.listener.halt(self.tabID)
self.imageQueues[0].clear()
self.imageQueues[1].clear()
self.imageQueuesInd = 0
self.curImageIndDisplayed = 0
self.isDisplayingEpisode = False
self.waitCount = 0
def setupRight(self):
# self.parameterFrame.grid_forget()
self.slowLabel = ttk.Label(self, text='Displayed episode speed')
self.slowLabel.grid(row=4, column=1)
self.slowSlider = ttkwidgets.tickscale.TickScale(self, from_=1, to=20, resolution=1,
orient=tkinter.HORIZONTAL)
slowSlider_ttp = View.CreateToolTip(self.slowSlider, "The speed at which to display the episodes")
self.slowSlider.set(10)
self.slowSlider.grid(row=5, column=1, sticky="news")
self.render = tkinter.Canvas(self, bg="gray80", highlightbackground="gray80")
self.render.grid(row=4, column=2, rowspan=6, columnspan=2, sticky='wens')
# tkinter.Canvas(self, height=15,bg="gray80").grid(row=2, column=1, rowspan=1, columnspan=1, sticky='wens')
self.displayedEpisodeNum = ttk.Label(self, text='Showing episode')
self.displayedEpisodeNum.grid(row=7, column=1)
self.curEpisodeNum = ttk.Label(self, text='Episodes completed:')
self.curEpisodeNum.grid(row=8, column=1)
self.graph = tkinter.Canvas(self, bg="gray80", highlightbackground="gray80")
self.graph.grid(row=0, column=2, rowspan=2, columnspan=1, sticky='wens')
self.graphLine = self.graph.create_line(0, 0, 0, 0, fill='black')
self.graph.bind("<Motion>", self.updateGraphLine)
self.xAxisLabel = tkinter.Canvas(self, height=15, bg="gray80", highlightbackground="gray80")
self.xAxisLabel.grid(row=2, column=2, rowspan=1, columnspan=1, sticky='wens')
# self.drawAxis()
# background='#eff0f1'
self.legend = tkinter.Canvas(self, bg="gray80", width=275, highlightbackground="gray80")
self.legend.grid(row=0, column=1, sticky='news')
self.legend.bind('<Configure>', self.legendResize)
self.space = ttk.Label(self, text=" ").grid(row=3, column=2)
# self.columnconfigure(0, weight=1)
# self.columnconfigure(1, weight=5)
self.frame.pack()
def busy(self):
pass
#Commented out because this crashes on Linux:
#self.root.config(cursor="wait")
def notbusy(self):
self.root.config(cursor="")
def loadingRender(self):
self.isLoadingRender = True
self.render.delete('all')
w = self.render.winfo_width()
h = self.render.winfo_height()
self.render.create_text(w / 2, h / 2, text='Loading...', anchor='center')
def train(self):
self.busy()
self.loadingRender()
if not self.listener.modelIsRunning(self.tabID):
self.smoothAmt = 20
try:
# total_episodes = int(self.numEps.get())
# max_steps = int(self.maxSteps.get())
# self.setupPage3()
self.listener.startTraining(self.tabID, self.parameterFrame.getParameters())
self.trainingEpisodes = 0
self.curTotalEpisodes = self.parameterFrame.getParameters()[0]
self.resetGraph()
self.checkMessages()
self.legend.itemconfig(self.testResult1, text='')
self.legend.itemconfig(self.testResult2, text='')
except ValueError:
print('Bad Hyperparameters')
def test(self):
self.busy()
self.loadingRender()
if not self.listener.modelIsRunning(self.tabID):
self.smoothAmt = 1
try:
# total_episodes = int(self.numEps.get())
# max_steps = int(self.maxSteps.get())
if not self.listener.startTesting(self.tabID, self.parameterFrame.getParameters()):
self.notbusy()
self.render.delete('all')
tkinter.messagebox.showerror(title="Error", message="Model has not been trained!")
self.trainingEpisodes = 0
self.curTotalEpisodes = self.parameterFrame.getParameters()[0]
self.resetGraph()
self.checkMessages()
self.legend.itemconfig(self.testResult1, text='')
self.legend.itemconfig(self.testResult2, text='')
except ValueError:
print('Bad Hyperparameters')
def save(self):
if not self.listener.modelIsRunning(self.tabID):
filename = filedialog.asksaveasfilename(initialdir="/", title="Select file")
if filename:
self.listener.save(filename, self.tabID)
def load(self):
if not self.listener.modelIsRunning(self.tabID):
filename = filedialog.askopenfilename(initialdir="/", title="Select file")
self.listener.load(filename, self.tabID)
def saveResults(self):
filename = filedialog.asksaveasfilename(initialdir="/", title="Select file")
file = open(filename, "w")
file.write("episode, loss, reward, epsilon\n")
for episode, (loss, reward, epsilon) in enumerate(self.graphDataPoints):
file.write(str(episode) + "," + str(loss) + "," + str(reward) + "," + str(epsilon) + "\n")
file.close()
def reset(self):
if not self.listener.modelIsRunning(self.tabID):
self.listener.reset(self.tabID)
def resetGraph(self):
self.graphDataPoints.clear()
self.smoothedDataPoints.clear()
self.curLossAccum = 0
self.curRewardAccum = 0
self.curEpisodeSteps = 0
self.episodeAccLoss = 0
self.episodeAccReward = 0
self.episodeAccEpsilon = 0
self.graph.delete('all')
# self.drawAxis()
self.graphLine = self.graph.create_line(0, 0, 0, 0, fill='black')
self.redrawGraphXAxis()
self.drawAxis()
def checkMessages(self):
if self.trainingEpisodes >= 1:
self.notbusy()
while self.listener.getQueue(self.tabID).qsize():
message = self.listener.getQueue(self.tabID).get(timeout=0)
if message.type == Model.Message.EVENT:
if message.data == Model.Message.EPISODE:
self.addEpisodeToGraph()
self.trainingEpisodes += 1
self.curEpisodeNum.configure(text='Episodes completed: ' + str(self.trainingEpisodes))
if self.isDisplayingEpisode:
self.imageQueues[self.imageQueuesInd].clear()
else:
self.imageQueuesInd = 1 - self.imageQueuesInd
self.imageQueues[self.imageQueuesInd].clear()
self.isDisplayingEpisode = True
self.curImageIndDisplayed = 0
self.displayedEpisodeNum.configure(text='Showing episode ' + str(self.trainingEpisodes))
elif message.data == Model.Message.TRAIN_FINISHED:
self.imageQueues[0].clear()
self.imageQueues[1].clear()
self.imageQueuesInd = 0
self.curImageIndDisplayed = 0
self.isDisplayingEpisode = False
self.waitCount = 0
totalReward = sum([reward for _, reward, _ in self.graphDataPoints])
avgReward = totalReward / len(self.graphDataPoints)
self.legend.itemconfig(self.testResult1, text='Total Training Reward: ' + str(totalReward))
self.legend.itemconfig(self.testResult2, text='Reward/Episode: ' + str(avgReward))
self.loadingRenderUpdate()
return
elif message.data == Model.Message.TEST_FINISHED:
self.imageQueues[0].clear()
self.imageQueues[1].clear()
self.imageQueuesInd = 0
self.curImageIndDisplayed = 0
self.isDisplayingEpisode = False
self.waitCount = 0
totalReward = sum([reward for _, reward, _ in self.graphDataPoints])
avgReward = totalReward / len(self.graphDataPoints)
self.legend.itemconfig(self.testResult1, text='Total Test Reward: ' + str(totalReward))
self.legend.itemconfig(self.testResult2, text='Reward/Episode: ' + str(avgReward))
self.loadingRenderUpdate()
return
elif message.type == Model.Message.STATE:
self.imageQueues[self.imageQueuesInd].append(message.data.image)
self.accumulateState(message.data)
self.updateEpisodeRender()
self.master.after(10, self.checkMessages)
def loadingRenderUpdate(self):
if self.isLoadingRender:
self.notbusy()
self.render.delete('all')
def addEpisodeToGraph(self):
avgLoss = self.episodeAccLoss / self.curEpisodeSteps
totalReward = self.episodeAccReward
avgEpsilon = self.episodeAccEpsilon / self.curEpisodeSteps
avgState = (avgLoss, totalReward, avgEpsilon)
self.graphDataPoints.append(avgState)
self.redrawGraph(len(self.graphDataPoints) % max(5, self.smoothAmt) == 0)
self.curEpisodeSteps = 0
self.episodeAccLoss = 0
self.episodeAccReward = 0
self.episodeAccEpsilon = 0
def redrawGraphXAxis(self):
w = self.graph.winfo_width()
h = self.graph.winfo_height()
step = 1
while self.curTotalEpisodes // step > 13:
step *= 5
if self.curTotalEpisodes // step <= 13:
break
step *= 2
for ind in range(0, int(self.curTotalEpisodes), step):
x = w * (ind / self.curTotalEpisodes)
self.graph.create_line(x, h - self.graphBottomMargin, x, h - self.graphBottomMargin / 2)
self.graph.create_text(x, h - self.graphBottomMargin / 2, text=str(ind), anchor='n')
def redrawGraph(self, full):
if full:
lastN = len(self.graphDataPoints)
self.curLossAccum = 0
self.curRewardAccum = 0
self.smoothedDataPoints.clear()
self.lossGraphMax = max(0.0000000000001, sorted([loss for loss, _, _ in self.graphDataPoints])[
int((len(self.graphDataPoints) - 1) * 0.95)] * 1.1)
rewardSorted = sorted([reward for _, reward, _ in self.graphDataPoints])
self.rewardGraphMax = rewardSorted[int((len(self.graphDataPoints) - 1) * 0.95)]
self.rewardGraphMin = rewardSorted[int((len(self.graphDataPoints) - 1) * 0.05)]
extendAmt = 0.1 * (self.rewardGraphMax - self.rewardGraphMin)
self.rewardGraphMax += extendAmt
self.rewardGraphMin -= extendAmt
print('loss graph max:', self.lossGraphMax)
print('reward graph min/max:', self.rewardGraphMin, self.rewardGraphMax)
self.graph.delete('all')
self.redrawGraphXAxis()
self.graphLine = self.graph.create_line(0, 0, 0, 0, fill='black')
else:
lastN = 1
w = self.graph.winfo_width()
h = self.graph.winfo_height()
offset = len(self.graphDataPoints) - lastN
for ind in range(max(0, offset), len(self.graphDataPoints)):
oldX = w * (ind / self.curTotalEpisodes)
newX = w * ((ind + 1) / self.curTotalEpisodes)
avgLoss, totalReward, avgEpsilon = self.graphDataPoints[ind]
if ind > 0:
_, _, prevEpsilon = self.graphDataPoints[ind - 1]
oldY = h * (1 - prevEpsilon)
newY = h * (1 - avgEpsilon)
self.graph.create_line(oldX, oldY, newX, newY, fill='green')
if ind >= self.smoothAmt:
prevLoss, prevReward = self.curLossAccum / self.smoothAmt, self.curRewardAccum / self.smoothAmt
(obsLoss, obsReward, _) = self.graphDataPoints[ind - self.smoothAmt]
self.curLossAccum -= obsLoss
self.curRewardAccum -= obsReward
self.curLossAccum += avgLoss
self.curRewardAccum += totalReward
curReward = self.curRewardAccum / self.smoothAmt
curLoss = self.curLossAccum / self.smoothAmt
self.smoothedDataPoints.append((curLoss, curReward, avgEpsilon))
rewardRange = max(0.000000001, self.rewardGraphMax - self.rewardGraphMin)
oldY = (self.graphBottomMargin + (h - self.graphBottomMargin) * (
1 - (prevReward - self.rewardGraphMin) / rewardRange)) - 4
newY = (self.graphBottomMargin + (h - self.graphBottomMargin) * (
1 - (curReward - self.rewardGraphMin) / rewardRange)) - 4
self.graph.create_line(oldX, oldY, newX, newY, fill='red')
oldY = h * (1 - prevLoss / self.lossGraphMax)
newY = h * (1 - curLoss / self.lossGraphMax)
self.graph.create_line(oldX, oldY, newX, newY, fill='blue')
else:
self.curLossAccum += avgLoss
self.curRewardAccum += totalReward
self.drawAxis()
def drawAxis(self):
self.graph.create_line(2, 0, 2, self.graph.winfo_height(), fill='black')
# self.graph.create_line(0, int(self.graph.winfo_height()/2), self.graph.winfo_width(),
# int(self.graph.winfo_height()/2), fill='black')
self.graph.create_line(0, self.graph.winfo_height() - 3, self.graph.winfo_width(),
self.graph.winfo_height() - 3, fill='black')
self.xAxisLabel.create_text(int(self.xAxisLabel.winfo_width() / 2), int(self.xAxisLabel.winfo_height() / 2),
text='Timestamp', anchor='center')
def accumulateState(self, state):
if state.epsilon:
self.episodeAccEpsilon += state.epsilon
if state.reward:
self.episodeAccReward += state.reward
if state.loss:
self.episodeAccLoss += state.loss
self.curEpisodeSteps += 1
def updateEpisodeRender(self):
displayQueue = self.imageQueues[1 - self.imageQueuesInd]
if displayQueue:
if self.waitCount >= 21 - self.slowSlider.get():
self.waitCount = 0
tempImage = displayQueue[self.curImageIndDisplayed]
self.curImageIndDisplayed = self.curImageIndDisplayed + 1
if self.curImageIndDisplayed == len(displayQueue):
self.curImageIndDisplayed = 0
self.isDisplayingEpisode = False
if tempImage:
tempImage = tempImage.resize((self.render.winfo_width(), self.render.winfo_height()))
self.image = ImageTk.PhotoImage(
tempImage) # must maintain a reference to this image in self: otherwise will be garbage collected
if self.renderImage:
self.render.delete(self.renderImage)
self.isLoadingRender = False
self.renderImage = self.render.create_image(0, 0, anchor='nw', image=self.image)
self.waitCount += 1
def selectModel(self):
agent, env = None, None
for curAgent in View.agents:
if self.parameterFrame.agentOpts.get() == curAgent.displayName:
agent = curAgent
break
for curEnv in View.environments:
if self.parameterFrame.envOpts.get() == curEnv.displayName:
env = curEnv
break
if agent and env:
self.master.tab(self, text=agent.displayName + '+' + env.displayName)
self.parameterFrame.destroy()
self.parameterFrame = self.ParameterFrame(self, agent, env)
self.parameterFrame.grid(row=0, column=0, rowspan=9)
self.setupRight()
else:
messagebox.showerror("Error", "Please select both an agent and an environment")
def close(self):
self.listener.close(self.tabID)
class ParameterFrame(ttk.Frame):
def __init__(self, master, agentClass, envClass):
super().__init__(master)
self.isParameterFrame = True
self.master = master
master.listener.setAgent(master.tabID, agentClass)
master.listener.setEnvironment(master.tabID, envClass)
self.values = []
self.createParameterChooser(
agent.Agent.Parameter('Number of Episodes', 1, 655360, 1, 1000, True, True,
"The number of episodes to run the model on"))
self.createParameterChooser(
agent.Agent.Parameter('Max Size', 1, 655360, 1, 200, True, True,
"The max number of timesteps permitted in an episode"))
for param in agentClass.parameters:
self.createParameterChooser(param)
# train = ttk.Button(self, text='Train', command=self.master.train)
# train.pack(side='left')
# train_button_ttp = View.CreateToolTip(train, "Train the agent with the current settings")
# halt = ttk.Button(self, text='Halt', command=self.master.halt)
# halt.pack(side='left')
# halt_button_ttp = View.CreateToolTip(halt, "Pause the current training")
# test = ttk.Button(self, text='Test', command=self.master.test)
# test.pack(side='left')
# test_button_ttp = View.CreateToolTip(test, "Test the agent in its current state")
# save = ttk.Button(self, text='Save Agent', command=self.master.save)
# save.pack(side='left')
# save_button_ttp = View.CreateToolTip(save, "Save the agent in its current state")
# load = ttk.Button(self, text='Load Agent', command=self.master.load)
# load.pack(side='left')
# load_button_ttp = View.CreateToolTip(load, "Load an agent")
# reset = ttk.Button(self, text='Reset', command=self.master.reset)
# reset.pack(side='left')
# reset_button_ttp = View.CreateToolTip(reset, "Reset the current agent and its parameters")
# save_results = ttk.Button(self, text='Save Results', command=self.master.saveResults)
# save_results.pack(side='left')
# save_results_button_ttp = View.CreateToolTip(save_results, "Save the results of the current training session")
def createParameterChooser(self, param):
subFrame = ttk.Frame(self)
ttk.Label(subFrame, text=param.name, width=18).pack(side="left", expand=True, fill='both', padx=5,
pady=5)
valVar = tkinter.StringVar()
input = None
def scaleChanged(val):
if subFrame.focus_get() != input:
valVar.set(val)
scale = ttkwidgets.tickscale.TickScale(subFrame, from_=param.min, to=param.max,
resolution=param.resolution,
orient=tkinter.HORIZONTAL, command=scaleChanged, length=170)
View.CreateToolTip(scale, param.toolTipText)
scale.set(param.default)
scale.pack(side="left", expand=True, fill='both', padx=5, pady=5)
def entryChanged(var, indx, mode):
try:
if subFrame.focus_get() == input:
scale.set(float(valVar.get()))
except ValueError:
pass
valVar.trace_add('write', entryChanged)
input = ttk.Entry(subFrame, textvariable=valVar)
valVar.set(str(param.default))
input.pack(side="right", expand=True, padx=5, pady=5)
subFrame.pack(side='top')
self.values.append(scale)
def getParameters(self):
return [value.get() for value in self.values]
class ModelChooser(ttk.Frame):
def __init__(self, master):
super().__init__(master)
self.isParameterFrame = False
self.agentOpts = tkinter.StringVar(self)
self.envOpts = tkinter.StringVar(self)
self.envButtons = []
self.agentButtons = []
subFrame = ttk.Frame(self)
envName = [opt.displayName for opt in View.environments]
agtName = [opt.displayName for opt in View.agents]
# ttk.Combobox(subFrame, state='readonly', values=agtName, textvariable = self.agentOpts).pack(side='left')
# ttk.Combobox(subFrame, state='readonly', values=envName, textvariable = self.envOpts).pack(side='left')
imgloc = "./img/"
imty = '.jpg'
entxb = tkinter.Text(subFrame, height=5, width=137, wrap=tkinter.NONE, bg="gray80")
enscb = ttk.Scrollbar(subFrame, orient=tkinter.HORIZONTAL, command=entxb.xview)
entxb.configure(xscrollcommand=enscb.set)
enscb.pack(fill=tkinter.X)
entxb.pack()
self.slev = ttk.Label(subFrame, text='Selected Environment: None')
self.slev.pack(pady=(15, 75))
# style = Style()
# style.configure('TButton', activebackground="gray80",
# borderwidth='4', )
for e in envName:
try:
epic = Image.open(imgloc + e + imty)
epic = epic.resize((50, 50), Image.ANTIALIAS)
piepic = PhotoImage(epic)
eb = tkinter.Radiobutton(entxb, image=piepic, text=e, variable=self.envOpts, value=e,
command=self.selevUpdate, compound=tkinter.TOP, indicatoron=0,
height=70)
eb.piepic = piepic
self.envButtons.append(eb)
except IOError:
epic = Image.open(imgloc + "custom" + imty)
epic = epic.resize((50, 50), Image.ANTIALIAS)
piepic = PhotoImage(epic)
eb = tkinter.Radiobutton(entxb, image=piepic, text=e, variable=self.envOpts, value=e,
command=self.selevUpdate, compound=tkinter.TOP, indicatoron=0,
height=70)
eb.piepic = piepic
self.envButtons.append(eb)
# anchor=tkinter.S
entxb.window_create(tkinter.END, window=eb)
entxb.configure(state=tkinter.DISABLED)
agtxb = tkinter.Text(subFrame, height=2, width=137, wrap=tkinter.NONE, bg="gray80")
agscb = ttk.Scrollbar(subFrame, orient=tkinter.HORIZONTAL, command=agtxb.xview)
agtxb.configure(xscrollcommand=agscb.set)
agscb.pack(fill=tkinter.X)
agtxb.pack()
self.slag = ttk.Label(subFrame, text='Selected Agent: None')
self.slag.pack(pady=(15, 30))
for a in agtName:
ab = tkinter.Radiobutton(agtxb, text=a, variable=self.agentOpts, value=a, command=self.selagUpdate,
compound=tkinter.TOP, indicatoron=0, height=1)
agtxb.window_create(tkinter.END, window=ab)
self.agentButtons.append(ab)
agtxb.configure(state=tkinter.DISABLED)
subFrame.pack()
set_model = tkinter.Button(self, text='Set Model', command=master.selectModel)
set_model.pack()
space = tkinter.Canvas(self, bg="gray80", highlightbackground="gray80")
space.pack()
View.CreateToolTip(set_model, "Run program with the currently selected environment and agent")
def selevUpdate(self):
envUpdate = 'Selected Environment: ' + self.envOpts.get()
self.slev.config(text=envUpdate)
curAgents = View.allowedAgents.get(self.envOpts.get())
if curAgents is not None:
for agentButton in self.agentButtons:
if agentButton.cget('text') in curAgents or agentButton.cget('text') not in View.allowedEnvs:
agentButton.configure(state=tkinter.NORMAL)
else:
agentButton.configure(state=tkinter.DISABLED)
if agentButton.cget('text') == self.agentOpts.get():
self.agentOpts.set(None)
else:
for agentButton in self.agentButtons:
agentButton.configure(state=tkinter.NORMAL)
for envButton in self.envButtons:
envButton.configure(state=tkinter.NORMAL)
def selagUpdate(self):
agUpdate = 'Selected Agent: ' + self.agentOpts.get()
self.slag.config(text=agUpdate)
curEnvs = View.allowedEnvs.get(self.agentOpts.get())
if curEnvs is not None:
for envButton in self.envButtons:
if envButton.cget('text') in curEnvs or envButton.cget('text') not in View.allowedAgents:
envButton.configure(state=tkinter.NORMAL)
else:
envButton.configure(state=tkinter.DISABLED)
if envButton.cget('text') == self.envOpts.get():
self.envOpts.set(None)
else:
for envButton in self.envButtons:
envButton.configure(state=tkinter.NORMAL)
for agentButton in self.agentButtons:
agentButton.configure(state=tkinter.NORMAL)
class EnvironmentChooser(ttk.Frame):
def __init__(self, master, listener):
super().__init__(master, listener)
self.title = ttk.Label(self.frame, text='Select an Environment:')
self.title.grid(row=1, column=4, columnspan=2, sticky='wens')
self.frozenLakeButton = ttk.Button(self.frame, text='Frozen Lake', fg='black',
command=self.chooseFrozenLake)
self.frozenLakeButton.grid(row=2, column=4, columnspan=2, sticky='wens')
self.cartPoleButton = ttk.Button(self.frame, text='Cart Pole', fg='black',
command=self.chooseCartPoleEnv)
self.cartPoleButton.grid(row=3, column=4, columnspan=2, sticky='wens')
self.cartPoleDiscreteButton = ttk.Button(self.frame, text='Cart Pole Discretized', fg='black',
command=self.chooseCartPoleDiscreteEnv)
self.cartPoleDiscreteButton.grid(row=4, column=4, columnspan=2, sticky='wens')
self.customButton = ttk.Button(self.frame, text='Custom Environment', fg='black',
command=self.chooseCustom)
self.customButton.grid(row=5, column=4, columnspan=2, sticky='wens')
self.frame.grid(row=0, column=0)
self.frame.lift()
def chooseFrozenLake(self):
self.listener.setEnvironment()
View.ProjectWindow(self.master, self.listener)
self.frame.destroy()
def chooseCartPoleEnv(self):
self.listener.setCartPoleEnv()
View.ProjectWindow(self.master, self.listener)
self.frame.destroy()
def chooseCartPoleDiscreteEnv(self):
self.listener.setCartPoleDiscreteEnv()
View.ProjectWindow(self.master, self.listener)
self.frame.destroy()
def chooseCustom(self):
pass
def center(win):
win.update_idletasks()
width = win.winfo_width()
height = win.winfo_height()
x = (win.winfo_screenwidth() // 2) - (width // 2)
y = (win.winfo_screenheight() // 2) - (height // 2)
win.geometry('{}x{}+{}+{}'.format(width, height, x, y))
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,595 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/trpo.py |
from Agents import agent, modelFreeAgent
from Agents.ppo import PPO
from Agents.deepQ import DeepQ
from Agents.models import Actor, Critic
from Agents.Collections import ExperienceReplay
from Agents.Collections.TransitionFrame import TransitionFrame
import tensorflow as tf
#from tensorflow.linalg.experimental import conjugate_gradient
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Conv2D
from tensorflow.keras.layers import Flatten, TimeDistributed, LSTM, multiply
from tensorflow.keras import utils
from tensorflow.keras.losses import KLDivergence, MSE
from tensorflow.keras.optimizers import Adam
from tensorflow.compat.v1.train import GradientDescentOptimizer
#import tensorflow_probability as tfp
import numpy as np
import copy
import random
import joblib
import math
class TRPO(PPO):
displayName = 'TRPO'
newParameters = [DeepQ.Parameter('Value learning rate+', 0.00001, 1, 0.00001, 0.001,
True, True,
"A learning rate that the Adam optimizer starts at")
]
parameters = PPO.parameters + newParameters
#Invoke constructor
def __init__(self, *args):
paramLen = len(TRPO.newParameters)
super().__init__(*args[:-paramLen])
self.gamma = 0.99
'''self.min_epsilon = modelFreeAgent.ModelFreeAgent.min_epsilon
self.max_epsilon = modelFreeAgent.ModelFreeAgent.max_epsilon
self.decay_rate = modelFreeAgent.ModelFreeAgent.decay_rate
self.time_steps = modelFreeAgent.ModelFreeAgent.time_steps'''
self.parameters = TRPO.parameters
self.newParameters = PPO.newParameters
self.c1 = 0.001
self.c2 = 0.001
self.loss = 0
self.Lambda = 1
'''Qparams = []
empty_state = self.get_empty_state()
for i in range(3):
Qparams.append(DeepQ.newParameters[i].default)
self.batch_size, self.memory_size, self.target_update_interval = [int(param) for param in Qparams]
self.memory = ExperienceReplay.ReplayBuffer(self, self.memory_size, TransitionFrame(empty_state, -1, 0, empty_state, False))'''
self.total_steps = 0
self.allMask = np.full((1, self.action_size), 1)
self.allBatchMask = np.full((self.actorIts, self.action_size), 1)
def sample(self):
return self.memory.sample(self.actorIts)
def addToMemory(self, state, action, reward, next_state, done):
self.memory.append_frame(TransitionFrame(state, action, reward, next_state, done))
def predict(self, state, isTarget):
pass
def remember(self, state, action, reward, next_state, done):
self.addToMemory(state, action, reward, next_state, done)
loss = 0
if len(self.memory) < 2*self.batch_size:
return loss
losses = []
_, mini_batch = self.sample()
states, actions, rewards, next_states, dones = self.sample_trajectories(mini_batch)
X_train, Y_train = self.calculateTargetValues(mini_batch)
self.value_model.train_on_batch(X_train, Y_train)
# Create optimizer for minimizing loss
optimizer = GradientDescentOptimizer(learning_rate= 0.001)
for idx, transition in enumerate(mini_batch):
state = transition.state
next_state = transition.next_state
shape = (1,) + self.state_size
state = np.reshape(state, shape)
next_state = np.reshape(next_state, shape)
# Compute old probability
old_probs = self.get_probabilities(states)
old_probs = np.array(old_probs)
actions = np.array(actions)
old_p = tf.math.log(tf.reduce_sum(np.multiply(old_probs, actions)))
old_p = tf.stop_gradient(old_p)
goal = self.goal_idx(idx)
# Compute advantage
advantage = self.get_advantages(idx, goal)
# Compute new probabilities
new_probs = self.policy_model([states, self.allBatchMask], training=True)
new_probs = np.array(new_probs)
new_p = tf.math.log(tf.reduce_sum(np.multiply(new_probs, actions)))
# Compute probability ratio
prob_ratio = tf.math.exp(new_p - old_p)
# Run the policy under N timesteps using loss function
value_loss = self.c1 * self.mse_loss(state, next_state)
clip_loss = self.clipped_loss(prob_ratio, advantage)
self.train_policy(states, clip_loss)
entropy = self.get_entropy(state)
loss = self.agent_loss(value_loss, clip_loss, entropy)
losses.append(loss)
loss = np.mean(np.array(losses))
print("loss iteration: " + str(loss))
self.updateTarget()
# apply gradient optimizer to optimize loss and policy network
'''with tf.GradientTape() as tape:
loss = self.optimize_loss(loss, optimizer, tape)'''
return loss
def sample_trajectories(self, mini_batch):
states = np.zeros(((self.actorIts, ) + self.state_size))
next_states = np.zeros(((self.actorIts, ) + self.state_size))
actions = np.zeros(((self.actorIts, ) + (self.action_size, )))
rewards = []
dones = []
for index, transition in enumerate(mini_batch):
state, action, reward, next_state, done = transition
states[index, :] = state
actions[index, :] = action
rewards.append(reward)
next_states[index, :] = next_state
dones.append(done)
return states, actions, rewards, next_states, dones
def goal_idx(self, idx):
transitions = self.memory._transitions
while idx < len(self.memory._transitions)-1 and transitions[idx] is not None and transitions[idx].is_done is False:
idx+=1
return idx
'''def update_policy(self):
if self.total_steps >= 2*self.batch_size and self.total_steps % self.target_update_interval == 0:
self.policy_model.set_weights(self.newParameters)
print("target updated")
self.total_steps += 1'''
def updateTarget(self):
self.total_steps+=1
def calculateTargetValues(self, mini_batch):
X_train = [np.zeros((self.batch_size,) + self.state_size), np.zeros((self.batch_size,) + (self.action_size,))]
next_states = np.zeros((self.actorIts,) + self.state_size)
for index_rep, transition in enumerate(mini_batch):
states, actions, rewards, _, dones = transition
X_train[0][index_rep] = transition.state
X_train[1][index_rep] = self.create_one_hot(self.action_size, transition.action)
next_states[index_rep] = transition.next_state
Y_train = np.zeros((self.batch_size,) + (self.action_size,))
vnext = self.value_model.predict([next_states, self.allBatchMask])
vnext = np.mean(vnext, axis=1)
for index_rep, transition in enumerate(mini_batch):
if transition.is_done:
Y_train[index_rep][transition.action] = transition.reward
else:
Y_train[index_rep][transition.action] = transition.reward + vnext[index_rep] * (self.gamma ** (self.batch_size-index_rep))
return X_train, Y_train
def get_advantages(self, idx, goal):
#print("Goal: " + str(goal))
transitions = self.memory.get_next_transitions(idx, goal)
states = [transitions[i].state for i in range(goal-idx)]
next_states = [transitions[i].next_state for i in range(goal-idx)]
rewards = [transitions[i].reward for i in range(goal-idx)]
advantages = []
advantage = 0
total_gamma = 0
for j in range(goal-idx):
total_gamma = (self.gamma * self.Lambda) ** j
# discouNnted_rewards += total_gamma * rewards[j]
shape = (1,) + self.state_size
state = np.reshape(states[j], shape)
next_state = np.reshape(next_states[j], shape)
v = self.value_model.predict([state, self.allMask])
v = np.mean(v)
v_next = self.value_model.predict([next_state, self.allMask])
v_next = np.mean(v_next)
advantage += (total_gamma * v_next) - v + rewards[j]
advantages.append(advantage)
mean = np.mean(np.array(advantages), axis=0)
low = np.min(np.array(advantages))
high = np.max(np.array(advantages))
if high - low == 0:
return 0
return (mean - low) / (high - low)
def mse_loss(self, state, next_state):
v_pred = self.value_model.predict([state, self.allMask])
v_true = self.value_model.predict([next_state, self.allMask])
#return self.value_model.evaluate(x=[states, self.allBatchMask], y=[next_states, self.allBatchMask], batch_size=self.batch_size)
return tf.reduce_sum(MSE(v_true, v_pred)).numpy()
def clipped_loss(self, prob_ratio, advantage):
epsilon = self.min_epsilon + (self.max_epsilon - self.min_epsilon) * np.exp(-self.decay_rate * self.time_steps)
#epsilon = 0.2
clips = []
adv_ratio = (prob_ratio*advantage).numpy()
adv_ratio = np.mean(adv_ratio)
clips.append(adv_ratio)
clips.append(1-epsilon)
clips.append(1+epsilon)
minimum = math.inf
for clip in clips:
minimum = min(minimum, clip)
loss = minimum * advantage
return loss
def get_entropy(self, state):
from scipy.stats import entropy
shape = (1,) + self.state_size
state = np.reshape(state, shape)
probabilities = self.policy_model.predict([state, self.allMask])
#return tf.math.log(tf.reduce_sum((np.array(entropy(probabilities)))))
value, counts = np.unique(probabilities, return_counts=True)
probs = counts / len(probabilities)
entropy = 0
for i in probs:
entropy -= i * math.log(i, math.e)
return entropy
def agent_loss(self, value_loss, clip_loss, entropy):
return clip_loss + value_loss + (self.c2 * entropy)
'''def kl_divergence(self, states, new_states):
kl = tf.keras.losses.KLDivergence()
d_pred = self.policy_model.predict(states)
d_true = self.policy_model.predict(new_states)
return kl(new_states, states).numpy()'''
def optimize_loss(self, loss, optimizer, tape):
loss = np.array(loss)
loss = tf.convert_to_tensor(loss)
grads = tape.gradient(loss, self.policy_model.trainable_variables)
optimizer.apply_gradients(zip(grads, self.policy_model.trainable_variables))
return loss
def train_policy(self, states, clip_loss):
with tf.GradientTape() as tape:
tape.watch(self.policy_model.trainable_variables)
self.policy_model([states, self.allBatchMask], training=True)
#loss = np.array(self.agent_loss(value_loss, clip_loss, entropy))
#loss = tf.convert_to_tensor(loss)
#loss = self.agent_loss(value_loss, clip_loss, entropy)
loss = tf.convert_to_tensor(clip_loss)
tape.gradient(loss, self.policy_model.trainable_variables)
'''def policy_gradient(self, loss):
self.policy_network.zero_grad()
loss.backward(retain_graph=True)
policy_gradient = parameters_to_vector([v.grad for v in self.policy_network.parameters()]).squeeze(0)
return policy_gradient'''
'''def updateAgent(self, rollouts):
states = torch.cat([r.states for r in rollouts], dim=0)
actions = torch.cat([r.actions for r in rollouts], dim=0)
rewards = torch.cat([r.rewards for r in rollouts], dim=0).flatten()
new_states = torch.cat([r.new_states for r in rollouts], dim=0)
probabilities = torch.cat([r.action_dist for r in rollouts], dim=0)
# Compute advantage function used for computing loss function.
baseline = self.value_network.predict(states).data
rewards_tensor = Tensor(rewards).unsqueeze(1)
advantage = rewards_tensor - baseline
# Normalize the advantage
self.advantage = (advantage - advantage.mean()) / (advantage.std() + 1e-8)
loss = self.initial_loss(actions, probabilities)
policy_gradient = self.policy_gradient(loss)
if (policy_gradient.nonzero().size()[0]):
step_dir = self.conjugate_gradient(-policy_gradient)
dir_variable = Variable(torch.from_numpy(step_dir))
shs = .5 * step_dir.dot(self.hessian_vector(dir_variable).cpu().numpy().t)
lm = np.sqrt(shs / self.max_kl)
fullstep = step_dir / lm
grad_step = -policy_gradient.dot(dir_variable).data[0]
theta = self.linesearch(parameters_to_vector(self.policy_network.parameters()), states, actions, fullstep, grad_step / lm)
# Fit estimated value function to actual rewards
#ev_before = math_utils.explained_variance_1d(baseline.squeeze(1).cpu().numpy(), rewards)
self.value_network.zero_grad()
value_params = parameters_to_vector(self.value_network.parameters())
self.value_network.fit(states, Variable(Tensor(rewards)))
#ev_after = math_utils.explained_variance_1d(self.value_network.predict(self.states).data.squeeze(1).cpu().numpy(), rewards)
#if ev_after < ev_before or np.abs(ev_after) < 1e-4:
vector_to_parameters(value_params, self.value_network.parameters())
def initial_loss(self, actions, probabilities):
prob_new = torch.cat(probabilities).gather(1, torch.cat(actions))
prob_old = prob_new.detach() + 1e-8
prob_ratio = prob_new / prob_old
loss = -torch.mean(prob_ratio * Variable(self.advantage)) - (self.ent_coeff * self.entropy)
return loss
def kl_divergence(self, model, states):
states_tensor = torch.cat([Variable(Tensor(state)).unsqueeze(0) for state in states])
action_prob = model(states_tensor).detach() + 1e-8
old_action_prob = self.policy_network(states_tensor)
return torch.sum(old_action_prob * torch.log(old_action_prob/action_prob), 1).mean()
# Computes hessian vector product
def hessian_vector(self, vector):
self.policy_network.zero_grad()
kl_div = self.kl_divergence(self.policy_network)
kl_grad = torch.autograd.grad(kl_div, self.policy_network.parameters(), create_graph=True)
kl_vector = torch.cat([grad.view(-1) for grad in kl_grad])
v_product = torch.sum(kl_vector * vector)
grad_product = torch.autograd.grad(v_product, self.policy_network.parameters())
actual_product = torch.cat([grad.contiguous().view(-1) for grad in grad_product]).data
return actual_product + (self.damping * vector.data)
# Uses conjugate gradient to ensure policy upates aren't too big or too small.
def conjugate_gradient(self, b, max_iterations=10):
r = b.clone().data
p = b.clone().data
x = np.zeros_like(b.data.cpu().numpy())
r_dotr = r.double().dot(r.double())
for i in range(max_iterations):
z = self.hessian_vector(Variable(p)).squeeze(0)
v = r_dotr / p.double().dot(z.double())
x += v * p.cpu().numpy()
r -= v * z
newr_dotr = r.double().dot(r.double())
mu = newr_dotr / r_dotr
p = r + mu * p
r_dotr = newr_dotr
if r_dotr < self.residual_total:
break
return x
def surrogate_loss(self, theta, states, actions):
new_policy_network = copy.deepcopy(self.policy_network)
vector_to_parameters(theta, new_policy_network.parameters())
states_tensor = torch.cat([Variable(Tensor(state)).unsqueeze(0) for state in states])
new_prob = new_policy_network(states_tensor).gather(1, torch.cat(actions)).data
old_prob = self.policy_network(states_tensor).gather(1, torch.cat(self.actions)).data + 1e-8
return -torch.mean((new_prob / old_prob) * self.advantage)
def linesearch(self, states, actions, x, fullstep, exp_improverate):
accept_ratio = .1
max_backtracks = 10
fval = self.surrogate_loss(x, states, actions)
for (_n_backtracks, stepfrac) in enumerate(.5**np.arange(max_backtracks)):
xnew = x.data.cpu().numpy() + stepfrac * fullstep
newfval = self.surrogate_loss(Variable(torch.from_numpy(xnew)), states, actions)
actual_improvement = fval - newfval
expected_improvement = exp_improverate * stepfrac
ratio = actual_improvement / expected_improvement
if ratio > accept_ratio and actual_improvement > 0:
self.loss = newfval
return Variable(torch.from_numpy(xnew))
self.loss = fval
return x
# Performs a policy update by updating parameters.
def apply_update(self, grad_flattened):
n = 0
for p in TRPO.parameters:
numel = p.numel()
g = grad_flattened[n:n + numel].view(p.shape)
p.data += g
n += numel'''
def update(self):
pass
def save(self, filename):
mem1 = self.value_model.get_weights()
joblib.dump((TRPO.displayName, mem1), filename)
mem2 = self.policy_model.get_weights()
joblib.dump((TRPO.displayName, mem2), filename)
def load(self, filename):
name, mem = joblib.load(filename)
if name != TRPO.displayName:
print('load failed')
else:
self.policy_model.set_weights(mem)
self.value_model.set_weights(mem)
def memsave(self):
return self.policy_model.get_weights()
def memload(self, mem):
self.policy_model.set_weights(mem)
self.value_model.set_weights(mem)
def reset(self):
pass
def __deepcopy__(self, memodict={}):
pass
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,596 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/doubleDuelingQNative.py | from Agents import modelFreeAgent
import numpy as np
from collections import deque
import random
import joblib
import cffi
import os
import pathlib
import platform
import importlib
class DoubleDuelingQNative(modelFreeAgent.ModelFreeAgent):
displayName = 'Double, Dueling Deep Q Native'
newParameters = [modelFreeAgent.ModelFreeAgent.Parameter('Batch Size', 1, 256, 1, 32, True, True, "The number of transitions to consider simultaneously when updating the agent"),
modelFreeAgent.ModelFreeAgent.Parameter('Memory Size', 1, 655360, 1, 1000, True, True, "The maximum number of timestep transitions to keep stored"),
modelFreeAgent.ModelFreeAgent.Parameter('Target Update Interval', 1, 100000, 1, 200, True, True, "The distance in timesteps between target model updates"),
modelFreeAgent.ModelFreeAgent.Parameter('Learning Rate', 0.00001, 100, 0.00001, 0.001, True, True, "The rate at which the parameters respond to environment observations")]
parameters = modelFreeAgent.ModelFreeAgent.parameters + newParameters
def __init__(self, *args):
paramLen = len(DoubleDuelingQNative.newParameters)
super().__init__(*args[:-paramLen])
self.batch_size, self.memory_size, self.target_update_interval, _ = [int(arg) for arg in args[-paramLen:]]
_, _, _, self.learning_rate = [arg for arg in args[-paramLen:]]
oldwd = pathlib.Path().absolute()
curDir = oldwd / "Agents/Native/deepQNative"
os.chdir(curDir.as_posix())
self.ffi = cffi.FFI()
if platform.system() == "Windows":
if not importlib.util.find_spec("Agents.Native.deepQNative.Release._deepQNative"):
self.compileLib(curDir)
import Agents.Native.deepQNative.Release._deepQNative as _deepQNative
else:
if not importlib.util.find_spec("Agents.Native.deepQNative._deepQNative"):
self.compileLib(curDir)
import Agents.Native.deepQNative._deepQNative as _deepQNative
self.nativeInterface = _deepQNative.lib
self.nativeDQN = self.nativeInterface.createAgentc(self.state_size[0], self.action_size, self.gamma, self.batch_size, self.memory_size, self.target_update_interval, self.learning_rate)
os.chdir(oldwd.as_posix())
def compileLib(self, curDir):
headerName = curDir / "deepQNative.h"
outputDir = (curDir / "Release") if platform.system() == "Windows" else curDir
with open(headerName) as headerFile:
self.ffi.cdef(headerFile.read())
self.ffi.set_source(
"_deepQNative",
"""
#include "deepQNative.h"
""",
libraries=["deepQNative"],
library_dirs=[outputDir.as_posix()],
include_dirs=[curDir.as_posix()]
)
self.ffi.compile(verbose=True, tmpdir=outputDir)
def __del__(self):
self.nativeInterface.freeAgentc(self.nativeDQN)
def choose_action(self, state):
cState = self.ffi.new("float[]", list(state))
action = self.nativeInterface.chooseActionc(self.nativeDQN, cState)
return action
def remember(self, state, action, reward, new_state, done=False):
cState = self.ffi.new("float[]", list(state))
#cNewState = self.ffi.new("float[]", new_state)
done = 1 if done else 0
loss = self.nativeInterface.rememberc(self.nativeDQN, cState, action, reward, done)
return loss
def update(self):
pass
def reset(self):
pass
def __deepcopy__(self, memodict={}):
pass
def save(self, filename):
cFilename = self.ffi.new("char[]", filename.encode('ascii'))
self.nativeInterface.savec(self.nativeDQN, cFilename)
def load(self, filename):
cFilename = self.ffi.new("char[]", filename.encode('ascii'))
self.nativeInterface.loadc(self.nativeDQN, cFilename)
def memsave(self):
return self.nativeInterface.memsavec(self.nativeDQN)
def memload(self, mem):
self.nativeInterface.memloadc(self.nativeDQN, mem) | {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,597 | RobertCordingly/easyRL-v0 | refs/heads/master | /Environments/environment.py | import gym
from abc import ABC
"""This is an abstract environment class that allows a user to define
their own custom environment by extending this class as a 'CustomEnv' class.
"""
class Environment(ABC):
displayName = 'Environment'
"""Constructor method
"""
def __init__(self):
self.action_size = None
self.state_size = None
self.state = None
self.done = None
def step(self, action):
"""Advances the state of the environment one time step given the agent's action
:param action: the action the agent will take before taking the step
:type action: int
:return: the reward the agent obtains by taking the action and the time step advancing
:rtype: number
"""
pass
def reset(self):
"""Resets the environment to an initial state
:return: the state of the reset environment
:rtype: tuple
"""
pass
def sample_action(self):
"""Samples an action from the environment
:return: some action the agent can take in the environment
:rtype: int
"""
pass
def render(self):
"""Renders the environment as an image
:return: an image representing the current environment state
:rtype: PIL.Image
"""
pass
def close(self):
"""Closes the environment, freeing any resources it is using
:return: None
:rtype: None
"""
pass | {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,598 | RobertCordingly/easyRL-v0 | refs/heads/master | /MVC/cloudBridge.py | import boto3
import uuid
import json
import time
import os
import math
import requests
class CloudBridge:
def __init__(self, jobID, secretKey, accessKey, sessionToken, model):
self.animationFrames = []
self.jobID = jobID
self.secretKey = secretKey
self.accessKey = accessKey
self.s3Client = None
self.episodeData = []
self.gifURLs = []
self.delayTime = 1000
self.uploadModels = True
self.model = model
self.startTime = int(round(time.time() * 1000))
self.lastSave = 0
self.botoSession = boto3.Session (
aws_access_key_id = accessKey,
aws_secret_access_key = secretKey,
aws_session_token = sessionToken,
region_name = 'us-east-1'
)
# Episode Variables
self.trainingEpisodes = 0
# If JobID is null generate one (WebGUI will pass in JobID)
if (self.jobID is None):
self.jobID = uuid.uuid4()
self.refresh()
self.init()
# Create bucket for job in S3 to store data.
def init(self):
if self.s3Client is None:
self.s3Client = self.botoSession.client('s3')
bucketName = 'easyrl-' + str(self.jobID)
print(bucketName)
self.s3Client.create_bucket(Bucket=bucketName)
print("Created bucket for job.")
# Kill infrastructure
def terminate(self):
pass
def upload(self, filename):
if self.s3Client is None:
self.s3Client.upload_file(filename, 'easyrl-' + str(self.jobID), filename)
def setState(self, state):
self.state = state
def refresh(self):
self.state = "Idle"
# Step Variables
self.episodeAccEpsilon = 0
self.episodeAccReward = 0
self.episodeAccLoss = 0
self.curEpisodeSteps = 0
self.trueTotalReward = 0
self.animationFrames.clear()
def submitStep(self, frame, epsilon, reward, loss):
self.animationFrames.append(frame)
if epsilon is None:
epsilon = 0
if reward is None:
reward = 0
if loss is None:
loss = 0
# Accumulate Step
self.episodeAccEpsilon += epsilon
self.episodeAccReward += reward
self.episodeAccLoss += loss
self.curEpisodeSteps += 1
def submitEpisode(self, episode, totalEpisodes):
self.trainingEpisodes += 1
# Redraw Graph
avgLoss = self.episodeAccLoss / self.curEpisodeSteps
totalReward = self.episodeAccReward
avgEpsilon = self.episodeAccEpsilon / self.curEpisodeSteps
self.trueTotalReward += totalReward
# Append data to data structure
# e = episdoe, l = averageloss, r = totalreward, p = avgEpsilon
self.episodeData.append({
"e": episode,
"l": round(avgLoss,3),
"p": round(avgEpsilon,3),
"r": round(totalReward,3)
})
self.curEpisodeSteps = 0
self.episodeAccLoss = 0
self.episodeAccReward = 0
self.episodeAccEpsilon = 0
if (len(self.episodeData)) > 1000:
self.episodeData.pop(0)
currentTime = int(round(time.time() * 1000))
if ((currentTime - self.lastSave) > self.delayTime) or (int(episode) > int(totalEpisodes) - 5):
self.lastSave = currentTime
if (self.state == "Training" and self.uploadModels):
self.model.save("model.bin")
# Render Gif
if (len(self.animationFrames) > 0):
filename = self.state + '-episode-' + str(episode) + ".gif"
self.animationFrames[0].save("./" + filename, save_all=True, append_images=self.animationFrames)
self.s3Client.upload_file(filename, 'easyrl-' + str(self.jobID), filename, ExtraArgs={'ACL': 'public-read'})
os.remove("./" + filename)
self.gifURLs.append("https://easyrl-" + str(self.jobID) + ".s3.amazonaws.com/" + filename)
if (len(self.gifURLs)) > 10:
self.gifURLs.pop(0)
payload = {
"episodesCompleted": int(episode),
"totalReward": round(self.trueTotalReward),
"avgReward": round(self.trueTotalReward / self.trainingEpisodes),
"uptime": int(round(time.time() * 1000)) - self.startTime,
"episodes": self.episodeData,
"gifs": self.gifURLs
}
with open('data.json', 'w+') as f:
json.dump(payload, f)
self.animationFrames = []
def submitTrainFinish(self):
totalReward = self.episodeAccReward
avgReward = self.episodeAccReward / self.trainingEpisodes
time.sleep(15)
self.state = "Finished"
def submitTestFinish(self):
totalReward = self.episodeAccReward
avgReward = self.episodeAccReward / self.trainingEpisodes
time.sleep(15)
self.state = "Finished"
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,599 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/Collections/TransitionFrame.py | from collections import namedtuple
TransitionFrame = namedtuple('TransitionFrame', ['state', 'action', 'reward', 'next_state', 'is_done'])
ActionTransitionFrame = namedtuple('ActionTransitionFrame', ['prev_action', 'state', 'action', 'reward', 'next_state', 'is_done'])
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,600 | RobertCordingly/easyRL-v0 | refs/heads/master | /Environments/cartPoleEnv.py | from Environments import classicControlEnv
import gym
from PIL import Image, ImageDraw
import math
class CartPoleEnv(classicControlEnv.ClassicControlEnv):
displayName = 'Cart Pole'
def __init__(self):
self.env = gym.make('CartPole-v1')
self.action_size = self.env.action_space.n
self.state_size = self.env.observation_space.shape
def render(self):
if self.env.state is None: return None
screen_width = 600
screen_height = 400
state = self.env.state
world_width = self.env.x_threshold*2
scale = screen_width/world_width
cartx = state[0] * scale + screen_width / 2.0
carty = 100 # TOP OF CART
polewidth = 10.0
polelen = scale * (2 * self.env.length)
cartwidth = 50.0
cartheight = 30.0
image = Image.new('RGB', (screen_width, screen_height), 'white')
draw = ImageDraw.Draw(image)
l,r,t,b = -cartwidth/2, cartwidth/2, cartheight/2, -cartheight/2
axleoffset =cartheight/4.0
cartPoints = [(cartx + l, carty + b), (cartx + l, carty + t), (cartx + r, carty + t), (cartx + r, carty + b)]
draw.polygon(cartPoints, fill='black')
l,r,t,b = -polewidth/2,polewidth/2,polelen-polewidth/2,-polewidth/2
t, b = t + axleoffset, b + axleoffset
l, r, t, b = cartx + l, cartx + r, carty + t, carty + b
polePoints = [(l,b), (l,t), (r,t), (r,b)]
for i, (x, y) in enumerate(polePoints):
x -= cartx
y -= carty+axleoffset
x, y = x*math.cos(state[2])+y*math.sin(state[2]), -x*math.sin(state[2])+y*math.cos(state[2])
x += cartx
y += carty+axleoffset
polePoints[i] = x, y
draw.polygon(polePoints, fill=(204, 153, 102))
draw.chord([cartx-polewidth/2, carty+axleoffset-polewidth/2, cartx+polewidth/2, carty+axleoffset+polewidth/2], 0, 360, fill=(127,127,284))
draw.line([(0,carty), (screen_width,carty)], fill='black')
return image.transpose(method=Image.FLIP_TOP_BOTTOM)
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,601 | RobertCordingly/easyRL-v0 | refs/heads/master | /MVC/model.py | import random
import numpy as np
from Agents import cem, drqn, modelBasedAgent, modelFreeAgent
from Agents.Collections.TransitionFrame import TransitionFrame
import cProfile
from MVC import cloudBridge
import os.path
class Model:
def __init__(self):
# these can be set directly from the Controller based on user input from the View
self.environment_class = None
self.agent_class = None
self.isHalted = False
self.isRunning = False
self.environment = None
self.agent = None
self.loadFilename = None
self.cloudBridge = None
def createBridge(self, jobID, secretKey, accessKey, sessionToken):
print("Bridge Created")
if (self.cloudBridge is None):
self.cloudBridge = cloudBridge.CloudBridge(jobID, secretKey, accessKey, sessionToken, self)
# def run_learning(self, messageQueue, total_episodes, max_steps, *model_args):
# cProfile.runctx('self.run_learning2(messageQueue, total_episodes, max_steps, *model_args)', globals(), locals(),
# 'stats')
# def run_learning2(self, messageQueue, total_episodes, max_steps, *model_args):
def run_learning(self, messageQueue, total_episodes, max_steps, *model_args):
self.isRunning = True
if (self.cloudBridge is not None):
self.cloudBridge.refresh()
self.cloudBridge.setState("Training")
if not self.environment:
self.environment = self.environment_class()
if self.loadFilename:
self.agent = self.agent_class(self.environment.state_size, self.environment.action_size, *model_args)
self.agent.load(self.loadFilename)
self.loadFilename = None
elif not self.agent:
self.agent = self.agent_class(self.environment.state_size, self.environment.action_size, *model_args)
else: # if agent already exists, update the model arguments
mem = self.agent.memsave()
self.agent = self.agent_class(self.environment.state_size, self.environment.action_size, *model_args)
self.agent.memload(mem)
if (isinstance(self.agent, modelFreeAgent.ModelFreeAgent)):
'''
Training algorithm for Model Free Agents.
'''
min_epsilon, max_epsilon, decay_rate = self.agent.min_epsilon, self.agent.max_epsilon, self.agent.decay_rate
epsilon = max_epsilon
for episode in range(int(total_episodes)):
self.environment.reset()
for step in range(int(max_steps)):
old_state = self.environment.state
exp_exp_tradeoff = random.uniform(0, 1)
if exp_exp_tradeoff > epsilon:
action = self.agent.choose_action(old_state)
else:
action = self.environment.sample_action()
reward = self.environment.step(action)
loss = self.agent.remember(old_state, action, reward, self.environment.state, self.environment.done)
frame = self.environment.render()
if (self.cloudBridge is not None):
self.cloudBridge.submitStep(frame, epsilon, reward, loss)
modelState = Model.State(frame, epsilon, reward, loss)
message = Model.Message(Model.Message.STATE, modelState)
messageQueue.put(message)
if self.environment.done or self.isHalted:
break
self.agent.apply_hindsight()
if (self.cloudBridge is not None):
self.cloudBridge.submitEpisode(episode, int(total_episodes))
message = Model.Message(Model.Message.EVENT, Model.Message.EPISODE)
messageQueue.put(message)
epsilon = min_epsilon + (max_epsilon - min_epsilon) * np.exp(-decay_rate * episode)
if self.isHalted:
self.isHalted = False
break
elif (isinstance(self.agent, modelBasedAgent.ModelBasedAgent)):
'''
Training algorithm for Model Based Agents.
'''
for episode in range(int(total_episodes)):
# Reset the environment.
state = self.environment.reset()
# Evaluate the policy
episode_trajectory = []
loss = 0.0
# CEM evaluates multiple policies.
if (isinstance(self.agent, cem.CEM)):
for policy in self.agent.get_sample_policies():
# Reset the environment.
state = self.environment.reset()
# Sum of total policy rewards for this episode.
policy_trajectory = []
# Execute this episode for each policy.
for step in range(int(max_steps)):
# Execute one step.
old_state = self.environment.state
action = self.agent.choose_action(old_state, policy)
reward = self.environment.step(action)
# Add the reward to the total policy reward
policy_trajectory.append(TransitionFrame(old_state, action, reward, self.environment.state, self.environment.done))
# Render and save the step.
frame = self.environment.render()
if (self.cloudBridge is not None):
self.cloudBridge.submitStep(frame, 0, reward, 0)
# Send the state from the step.
modelState = Model.State(frame, None, reward, None)
message = Model.Message(Model.Message.STATE, modelState)
messageQueue.put(message)
if self.environment.done or self.isHalted:
break
# Add the policy rewards to the episode rewards.
episode_trajectory.append(policy_trajectory)
if self.isHalted:
break
# Update the agent only if all policies were evaluated.
if (len(episode_trajectory) == len(self.agent.get_sample_policies())):
loss = self.agent.update(episode_trajectory)
else:
# Execute this episode for each policy.
for step in range(int(max_steps)):
# Execute one step.
old_state = self.environment.state
action = self.agent.choose_action(old_state)
reward = self.environment.step(action)
# Add the reward to the total policy reward
episode_trajectory.append(TransitionFrame(old_state, action, reward, self.environment.state, self.environment.done))
# Render and save the step.
frame = self.environment.render()
if (self.cloudBridge is not None):
self.cloudBridge.submitStep(frame, 0, reward, 0)
# Send the state from the step.
modelState = Model.State(frame, None, reward, None)
message = Model.Message(Model.Message.STATE, modelState)
messageQueue.put(message)
if self.environment.done or self.isHalted:
break
# Improve the Policy
loss = self.agent.update(episode_trajectory)
# Send the loss of this episode.
modelState = Model.State(None, None, 0, loss)
message = Model.Message(Model.Message.STATE, modelState)
messageQueue.put(message)
if (self.cloudBridge is not None):
self.cloudBridge.submitEpisode(episode, int(total_episodes))
message = Model.Message(Model.Message.EVENT, Model.Message.EPISODE)
messageQueue.put(message)
if self.isHalted:
self.isHalted = False
break
if (self.cloudBridge is not None):
self.cloudBridge.submitTrainFinish()
message = Model.Message(Model.Message.EVENT, Model.Message.TRAIN_FINISHED)
messageQueue.put(message)
self.isRunning = False
print('learning done')
def run_testing(self, messageQueue, total_episodes, max_steps, *model_args):
total_episodes = int(total_episodes+0.5)
max_steps = int(max_steps+0.5)
self.isRunning = True
if (self.cloudBridge is not None):
self.cloudBridge.refresh()
self.cloudBridge.setState("Testing")
if not self.environment:
self.environment = self.environment_class()
if self.loadFilename:
self.agent = self.agent_class(self.environment.state_size, self.environment.action_size, *model_args)
self.agent.load(self.loadFilename)
self.loadFilename = None
elif not self.agent:
return
if self.agent:
if (isinstance(self.agent, modelFreeAgent.ModelFreeAgent)):
'''
Testing algorithm for Model Free Agents.
'''
min_epsilon, max_epsilon, decay_rate = self.agent.min_epsilon, self.agent.max_epsilon, self.agent.decay_rate
epsilon = max_epsilon
for episode in range(int(total_episodes)):
self.environment.reset()
for step in range(int(max_steps)):
old_state = self.environment.state
exp_exp_tradeoff = random.uniform(0, 1)
if exp_exp_tradeoff > epsilon:
action = self.agent.choose_action(old_state)
else:
action = self.environment.sample_action()
reward = self.environment.step(action)
if isinstance(self.agent, drqn.DRQN):
self.agent.addToMemory(old_state, action, reward, self.environment.state, episode, self.environment.done)
frame = self.environment.render()
if (self.cloudBridge is not None):
self.cloudBridge.submitStep(frame, 0, reward, 0)
# Send the state from the step.
modelState = Model.State(frame, None, reward, None)
message = Model.Message(Model.Message.STATE, modelState)
messageQueue.put(message)
if self.environment.done or self.isHalted:
break
if (self.cloudBridge is not None):
self.cloudBridge.submitEpisode(episode, int(total_episodes))
message = Model.Message(Model.Message.EVENT, Model.Message.EPISODE)
messageQueue.put(message)
epsilon = min_epsilon + (max_epsilon - min_epsilon) * np.exp(-decay_rate * episode)
if self.isHalted:
self.isHalted = False
break
elif (isinstance(self.agent, modelBasedAgent.ModelBasedAgent)):
'''
Testing algorithm for Model Based Agents.
'''
for episode in range(int(total_episodes)):
# Reset the environment.
self.environment.reset()
# Execute this episode.
for step in range(int(max_steps)):
# Execute one step.
old_state = self.environment.state
action = self.agent.choose_action(old_state)
reward = self.environment.step(action)
# Render the step
frame = self.environment.render()
if (self.cloudBridge is not None):
self.cloudBridge.submitStep(frame, 0, reward, 0)
modelState = Model.State(frame, None, reward, None)
message = Model.Message(Model.Message.STATE, modelState)
messageQueue.put(message)
if self.environment.done or self.isHalted:
break
message = Model.Message(Model.Message.EVENT, Model.Message.EPISODE)
messageQueue.put(message)
if self.isHalted:
self.isHalted = False
break
if (self.cloudBridge is not None):
self.cloudBridge.submitEpisode(episode, int(total_episodes))
if (self.cloudBridge is not None):
self.cloudBridge.submitTrainFinish()
message = Model.Message(Model.Message.EVENT, Model.Message.TEST_FINISHED)
messageQueue.put(message)
print('testing done')
self.isRunning = False
def halt_learning(self):
if self.isRunning:
self.isHalted = True
if (self.cloudBridge is not None):
self.cloudBridge.setState("Halted")
self.cloudBridge.terminate()
def reset(self):
self.environment = None
self.agent = None
def save(self, filename):
if self.agent:
self.agent.save(filename)
def load(self, filename):
self.loadFilename = filename
class Message:
# types of message
STATE = 0
EVENT = 1
# event types
TRAIN_FINISHED = 0
TEST_FINISHED = 1
EPISODE = 2
def __init__(self, type, data):
self.type = type
self.data = data
class State:
def __init__(self, image, epsilon, reward, loss):
self.image = image
self.epsilon = epsilon
self.reward = reward
self.loss = loss
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,602 | RobertCordingly/easyRL-v0 | refs/heads/master | /Environments/pendulumEnv.py | from Environments import classicControlEnv
import gym, random
from PIL import Image, ImageDraw
import numpy as np
import math
class PendulumEnv(classicControlEnv.ClassicControlEnv):
displayName = 'Pendulum'
def __init__(self):
self.env = gym.make('Pendulum-v0')
self.action_size = 10
self.action_low = self.env.action_space.low[0]
self.action_high = self.env.action_space.high[0]
self.action_range = self.action_high - self.action_low
self.action_tick = self.action_range/(self.action_size-1)
self.state_size = self.env.observation_space.shape
def step(self, action):
action = [self.action_low + action*self.action_tick]
return super().step(action)
def sample_action(self):
return random.randrange(self.action_size)
def render(self):
if self.env.state is None: return None
screen_width = 500
screen_height = 500
state = self.env.state
cartx = screen_width / 2.0
carty = screen_height / 2.0
polewidth = 10.0
polelen = 200
cartwidth = 50.0
cartheight = 30.0
image = Image.new('RGB', (screen_width, screen_height), 'white')
draw = ImageDraw.Draw(image)
l,r,t,b = -cartwidth/2, cartwidth/2, cartheight/2, -cartheight/2
axleoffset =cartheight/4.0
cartPoints = [(cartx + l, carty + b), (cartx + l, carty + t), (cartx + r, carty + t), (cartx + r, carty + b)]
draw.polygon(cartPoints, fill='black')
l,r,t,b = -polewidth/2,polewidth/2,polelen-polewidth/2,-polewidth/2
t, b = t + axleoffset, b + axleoffset
l, r, t, b = cartx + l, cartx + r, carty + t, carty + b
polePoints = [(l,b), (l,t), (r,t), (r,b)]
for i, (x, y) in enumerate(polePoints):
x -= cartx
y -= carty+axleoffset
x, y = x*math.cos(state[0])+y*math.sin(state[0]), -x*math.sin(state[0])+y*math.cos(state[0])
x += cartx
y += carty+axleoffset
polePoints[i] = x, y
draw.polygon(polePoints, fill=(204, 153, 102))
draw.chord([cartx-polewidth/2, carty+axleoffset-polewidth/2, cartx+polewidth/2, carty+axleoffset+polewidth/2], 0, 360, fill=(127,127,284))
draw.line([(0,carty), (screen_width,carty)], fill='black')
return image.transpose(method=Image.FLIP_TOP_BOTTOM)
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,603 | RobertCordingly/easyRL-v0 | refs/heads/master | /lambda/python_template/src/handler.py |
import paramiko
import boto3
import time
from Inspector import *
import logging
import json
import os
import sys
import random
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__))))
#
# Configure options here..
#
awsRegion = 'us-east-1'
backendAMI = 'ami-0bd8cfaa7944aedfe'
githubDefaultRepo = "https://github.com/RobertCordingly/easyRL-v0"
githubDefaultBranch = "dev/rl"
#
# To add a new agent define the information for it here. The index value must corespond to the index used in
# terminal view. After added to agentList, define agent hyper parameter order in paraMap and if there are new
# hyper parameter values add them to paramConditions.
#
agentList = [
{"name": "Q Learning", "description": "Basic Q Learning.", "index": "1", "supportedEnvs": ["singleDimDescrete"]},
{"name": "SARSA", "description": "State Action Reward State Action learning.", "index": "2", "supportedEnvs": ["singleDimDescrete"]},
{"name": "Deep Q (SRB)", "description": "Deep Q Learning using the standard replay buffer.", "index": "3", "supportedEnvs": ["singleDim", "singleDimDescrete"]},
{"name": "Deep Q (PRB)", "description": "Deep Q Learning using a prioritized replay buffer.", "index": "4", "supportedEnvs": ["singleDim", "singleDimDescrete"]},
{"name": "Deep Q (HER)", "description": "Deep Q Learning using a hindsight experience replay buffer.", "index": "5", "supportedEnvs": ["singleDim"]},
{"name": "DRQN (SRB)", "description": "Deep Recurrent Q-Network using the standard replay buffer.", "index": "6", "supportedEnvs": ["singleDim", "singleDimDescrete", "atari"]},
{"name": "DRQN (PRB)", "description": "Deep Recurrent Q-Network using a prioritized replay buffer.", "index": "7", "supportedEnvs": ["singleDim", "singleDimDescrete", "atari"]},
{"name": "DRQN (HER)", "description": "Deep Recurrent Q-Network using a hindsight experience replay buffer.", "index": "8", "supportedEnvs": ["singleDim", "singleDimDescrete", "atari"]},
{"name": "ADRQN (SRB)", "description": "Action-Specific Deep Recurrent Q-Network using the standard replay buffer.", "index": "9", "supportedEnvs": ["singleDim", "singleDimDescrete", "atari"]},
{"name": "ADRQN (PRB)", "description": "Action-Specific Deep Recurrent Q-Network using the standard replay buffer.", "index": "10", "supportedEnvs": ["singleDim", "singleDimDescrete", "atari"]},
{"name": "ADRQN (HER)", "description": "Action-Specific Deep Recurrent Q-Network using a hindsight experience replay buffer.", "index": "11", "supportedEnvs": ["singleDim", "singleDimDescrete", "atari"]},
{"name": "NPG", "description": "Natural Policy Gradient.", "index": "12", "supportedEnvs": ["singleDim"]},
{"name": "DDPG", "description": "Deep Deterministic Policy Gradient Learning.", "index": "13", "supportedEnvs": ["singleDim"]},
{"name": "CEM", "description": "Cross Entropy Method Learning.", "index": "14", "supportedEnvs": ["singleDim"]},
{"name": "SAC", "description": "Soft Actor Critic Learning.", "index": "15", "supportedEnvs": ["singleDim"]},
{"name": "TRPO", "description": "Trust Region Policy Optimization.", "index": "16", "supportedEnvs": ["singleDim"]},
{"name": "Rainbow", "description": "Reinforcement learning with the Rainbow agent.", "index": "17", "supportedEnvs": ["singleDim"]}
]
paraMap = {
'1': ['episodes', 'steps', 'gamma', 'minEpsilon', 'maxEpsilon', 'decayRate', 'alpha'], # Q Learning
'2': ['episodes', 'steps', 'gamma', 'minEpsilon', 'maxEpsilon', 'decayRate', 'alpha'], #SARSA
'3': ['episodes', 'steps', 'gamma', 'minEpsilon', 'maxEpsilon', 'decayRate', 'batchSize', 'memorySize', 'targetInterval'], # Deep Q
'4': ['episodes', 'steps', 'gamma', 'minEpsilon', 'maxEpsilon', 'decayRate', 'batchSize', 'memorySize', 'targetInterval', 'alpha'], # Deep Q
'5': ['episodes', 'steps', 'gamma', 'minEpsilon', 'maxEpsilon', 'decayRate', 'batchSize', 'memorySize', 'targetInterval'], # Deep Q
'6': ['episodes', 'steps', 'gamma', 'minEpsilon', 'maxEpsilon', 'decayRate', 'batchSize', 'memorySize', 'targetInterval', 'historyLength'], # DRQN
'7': ['episodes', 'steps', 'gamma', 'minEpsilon', 'maxEpsilon', 'decayRate', 'batchSize', 'memorySize', 'targetInterval', 'historyLength', 'alpha'], # DRQN
'8': ['episodes', 'steps', 'gamma', 'minEpsilon', 'maxEpsilon', 'decayRate', 'batchSize', 'memorySize', 'targetInterval', 'historyLength'], # DRQN
'9': ['episodes', 'steps', 'gamma', 'minEpsilon', 'maxEpsilon', 'decayRate', 'batchSize', 'memorySize', 'targetInterval', 'historyLength'], # ADRQN
'10': ['episodes', 'steps', 'gamma', 'minEpsilon', 'maxEpsilon', 'decayRate', 'batchSize', 'memorySize', 'targetInterval', 'historyLength', 'alpha'], # ARQN
'11': ['episodes', 'steps', 'gamma', 'minEpsilon', 'maxEpsilon', 'decayRate', 'batchSize', 'memorySize', 'targetInterval', 'historyLength'], # ADRQN
'12': ['episodes', 'steps', 'gamma', 'delta'], # NPG
'13': ['episodes', 'steps', 'gamma', 'minEpsilon', 'maxEpsilon', 'decayRate', 'batchSize', 'memorySize', 'targetInterval', 'tau'], # DDPG
'14': ['episodes', 'steps', 'gamma', 'sigma', 'population', 'elite'], # CEM
'15': ['episodes', 'steps', 'gamma', 'minEpsilon', 'maxEpsilon', 'decayRate', 'batchSize', 'memorySize', 'targetInterval', 'tau', 'temperature'], # SAC
'16': ['episodes', 'steps', 'gamma', 'minEpsilon', 'maxEpsilon', 'decayRate', 'batchSize', 'memorySize', 'targetInterval', 'policyLearnRate', 'valueLearnRate', 'horizon', 'epochSize', 'ppoEpsilon', 'ppoLambda', 'valueLearnRatePlus'], # TRPO
'17': ['episodes', 'steps', 'gamma', 'minEpsilon', 'maxEpsilon', 'decayRate', 'batchSize', 'memorySize', 'targetInterval', 'learningRate'] # Rainbow
}
instanceInfo = {
"c4.large": {
"cost": 0.1,
"vcpus": 2,
"gpus": 0,
"ram": 3.75,
"network": "Moderate"
},
"c4.xlarge": {
"cost": 0.19,
"vcpus": 4,
"gpus": 0,
"ram": 7.5,
"network": "High"
},
"c4.2xlarge": {
"cost": 0.39,
"vcpus": 8,
"gpus": 0,
"ram": 15,
"network": "High"
},
"c4.4xlarge": {
"cost": 0.79,
"vcpus": 16,
"gpus": 0,
"ram": 30,
"network": "High"
},
"c4.8xlarge": {
"cost": 1.59,
"vcpus": 36,
"gpus": 0,
"ram": 60,
"network": "10 Gigabit"
},
"g4dn.xlarge": {
"cost": 0.52,
"vcpus": 4,
"gpus": 1,
"ram": 16,
"network": "25 Gigabit"
},
"g4dn.2xlarge": {
"cost": 0.75,
"vcpus": 8,
"gpus": 1,
"ram": 32,
"network": "25 Gigabit"
},
"g4dn.4xlarge": {
"cost": 1.20,
"vcpus": 16,
"gpus": 0,
"ram": 64,
"network": "25 Gigabit"
},
"g4dn.8xlarge": {
"cost": 2.17,
"vcpus": 32,
"gpus": 1,
"ram": 128,
"network": "50 Gigabit"
},
}
agentMap = {}
for aa in agentList:
agentMap[aa['index']] = aa
envList = [
{"name": "Cart Pole", "description": "Gain reward by balancing the pole as long as possible.", "index": "1", "type": "singleDim"},
{"name": "Cart Pole Discrete", "description": "Balance the pole using descrete values instead.", "index": "2", "type": "singleDimDescrete"},
{"name": "Frozen Lake", "description": "Navigate the frozen lake without falling in!", "index": "3", "type": "singleDimDescrete"},
{"name": "Pendulum", "description": "Swing the pendulum to gain rewards.", "index": "4", "type": "singleDim"},
{"name": "Acrobot", "description": "Swing the arm of the robot as high as possible to maximize rewards.", "index": "5", "type": "singleDim"},
{"name": "Mountain Car", "description": "Drive the car up the mountains to gain reward.", "index": "6", "type": "singleDim"},
{"name": "Adventure", "description": "A classic atari game. Score points to gain rewards.", "index": "7", "type": "atari"},
{"name": "Air Raid", "description": "A classic atari game. Score points to gain rewards.", "index": "8", "type": "atari"},
{"name": "Alien", "description": "A classic atari game. Score points to gain rewards.", "index": "9", "type": "atari"},
{"name": "Amidar", "description": "A classic atari game. Score points to gain rewards.", "index": "10", "type": "atari"},
{"name": "Assault", "description": "A classic atari game. Score points to gain rewards.", "index": "11", "type": "atari"},
{"name": "Asterix", "description": "A classic atari game. Score points to gain rewards.", "index": "12", "type": "atari"},
{"name": "Asteroids", "description": "A classic atari game. Score points to gain rewards.", "index": "13", "type": "atari"},
{"name": "Atlantis", "description": "A classic atari game. Score points to gain rewards.", "index": "14", "type": "atari"},
{"name": "Bank Heist", "description": "A classic atari game. Score points to gain rewards.", "index": "15", "type": "atari"},
{"name": "Battle Zone", "description": "A classic atari game. Score points to gain rewards.", "index": "16", "type": "atari"},
{"name": "Beam Rider", "description": "A classic atari game. Score points to gain rewards.", "index": "17", "type": "atari"},
{"name": "Berzerk", "description": "A classic atari game. Score points to gain rewards.", "index": "18", "type": "atari"},
{"name": "Bowling", "description": "A classic atari game. Score points to gain rewards.", "index": "19", "type": "atari"},
{"name": "Boxing", "description": "A classic atari game. Score points to gain rewards.", "index": "20", "type": "atari"},
{"name": "Breakout", "description": "A classic atari game. Score points to gain rewards.", "index": "21", "type": "atari"},
{"name": "Carnival", "description": "A classic atari game. Score points to gain rewards.", "index": "22", "type": "atari"},
{"name": "Centipede", "description": "A classic atari game. Score points to gain rewards.", "index": "23", "type": "atari"},
{"name": "Chopper Command", "description": "A classic atari game. Score points to gain rewards.", "index": "24", "type": "atari"},
{"name": "Crazy Climber", "description": "A classic atari game. Score points to gain rewards.", "index": "25", "type": "atari"},
{"name": "Demon Attack", "description": "A classic atari game. Score points to gain rewards.", "index": "26", "type": "atari"},
{"name": "Double Dunk", "description": "A classic atari game. Score points to gain rewards.", "index": "27", "type": "atari"},
{"name": "Elevator Action", "description": "A classic atari game. Score points to gain rewards.", "index": "28", "type": "atari"},
{"name": "Enduro", "description": "A classic atari game. Score points to gain rewards.", "index": "29", "type": "atari"},
{"name": "Fishing Derby", "description": "A classic atari game. Score points to gain rewards.", "index": "30", "type": "atari"},
{"name": "Freeway", "description": "A classic atari game. Score points to gain rewards.", "index": "31", "type": "atari"},
{"name": "Frostbite", "description": "A classic atari game. Score points to gain rewards.", "index": "32", "type": "atari"},
{"name": "Gopher", "description": "A classic atari game. Score points to gain rewards.", "index": "33", "type": "atari"},
{"name": "Gravitar", "description": "A classic atari game. Score points to gain rewards.", "index": "34", "type": "atari"},
{"name": "Hero", "index": "35", "description": "A classic atari game. Score points to gain rewards.", "type": "atari"},
{"name": "Ice Hockey", "description": "A classic atari game. Score points to gain rewards.", "index": "36", "type": "atari"},
{"name": "Jamesbond", "description": "A classic atari game. Score points to gain rewards.", "index": "37", "type": "atari"},
{"name": "Journey Escape", "description": "A classic atari game. Score points to gain rewards.", "index": "38", "type": "atari"},
{"name": "Kangaroo", "description": "A classic atari game. Score points to gain rewards.", "index": "39", "type": "atari"},
{"name": "Krull", "description": "A classic atari game. Score points to gain rewards.", "index": "40", "type": "atari"},
{"name": "Kung Fu Master", "description": "A classic atari game. Score points to gain rewards.", "index": "41", "type": "atari"},
{"name": "Montezuma Revenge", "description": "A classic atari game. Score points to gain rewards.", "index": "42", "type": "atari"},
{"name": "Ms. Pacman", "description": "A classic atari game. Score points to gain rewards.", "index": "43", "type": "atari"},
{"name": "Name this Game", "description": "A classic atari game. Score points to gain rewards.", "index": "44", "type": "atari"},
{"name": "Phoenix", "description": "A classic atari game. Score points to gain rewards.", "index": "45", "type": "atari"},
{"name": "Pitfall", "description": "A classic atari game. Score points to gain rewards.", "index": "46", "type": "atari"},
{"name": "Pong", "description": "A classic atari game. Score points to gain rewards.", "index": "47", "type": "atari"},
{"name": "Pooyan", "description": "A classic atari game. Score points to gain rewards.", "index": "48", "type": "atari"},
{"name": "Private Eye", "description": "A classic atari game. Score points to gain rewards.", "index": "49", "type": "atari"},
{"name": "QBert", "description": "A classic atari game. Score points to gain rewards.", "index": "50", "type": "atari"},
{"name": "River Raid", "description": "A classic atari game. Score points to gain rewards.", "index": "51", "type": "atari"},
{"name": "Road Runner", "description": "A classic atari game. Score points to gain rewards.", "index": "52", "type": "atari"},
{"name": "RoboTank", "description": "A classic atari game. Score points to gain rewards.", "index": "53", "type": "atari"},
{"name": "SeaQuest", "description": "A classic atari game. Score points to gain rewards.", "index": "54", "type": "atari"},
{"name": "Skiing", "description": "A classic atari game. Score points to gain rewards.", "index": "55", "type": "atari"},
{"name": "Solaris", "description": "A classic atari game. Score points to gain rewards.", "index": "56", "type": "atari"},
{"name": "Space Invaders", "description": "A classic atari game. Score points to gain rewards.", "index": "57", "type": "atari"},
{"name": "Star Gunner", "description": "A classic atari game. Score points to gain rewards.", "index": "58", "type": "atari"},
{"name": "Tennis", "description": "A classic atari game. Score points to gain rewards.", "index": "59", "type": "atari"},
{"name": "Time Pilot", "description": "A classic atari game. Score points to gain rewards.", "index": "60", "type": "atari"},
{"name": "Tutankham", "description": "A classic atari game. Score points to gain rewards.", "index": "61", "type": "atari"},
{"name": "Up N Down", "description": "A classic atari game. Score points to gain rewards.", "index": "62", "type": "atari"},
{"name": "Venture", "description": "A classic atari game. Score points to gain rewards.", "index": "63", "type": "atari"},
{"name": "Video Pinball", "description": "A classic atari game. Score points to gain rewards.", "index": "64", "type": "atari"},
{"name": "Wizard of Wor", "description": "A classic atari game. Score points to gain rewards.", "index": "65", "type": "atari"},
{"name": "Yars Revenge", "description": "A classic atari game. Score points to gain rewards.", "index": "66", "type": "atari"},
{"name": "Zaxxon", "description": "A classic atari game. Score points to gain rewards.", "index": "67", "type": "atari"}
]
envMap = {}
for ev in envList:
envMap[ev['index']] = ev
paramConditions = {
"episodes": {
"name": "Number of Episodes",
"description": "The number of episodes to train the agent.",
"min": 1,
"max": 1000000000,
"default": 1000,
"showSlider": False
},
"steps": {
"name": "Max Size",
"description": "The max number of timesteps permitted in an episode.",
"min": 1,
"max": 1000000000,
"default": 200,
"showSlider": False
},
"gamma": {
"name": "Gamma",
"description": "The factor by which to discount future rewards.",
"min": 0,
"max": 1,
"default": 0.97,
"showSlider": True,
"stepSize": 0.01
},
"minEpsilon": {
"name": "Min Epsilon",
"description": "The minimum probability that the model will select a random action over its desired one.",
"min": 0,
"max": 1,
"default": 0.1,
"showSlider": True,
"stepSize": 0.01
},
"maxEpsilon": {
"name": "Max Epsilon",
"description": "The maximum/starting probability that the model will select a random action over its desired one.",
"min": 0,
"max": 1,
"default": 1,
"showSlider": True,
"stepSize": 0.01
},
"decayRate": {
"name": "Decay Rate",
"description": "The amount to decrease epsilon by each timestep.",
"min": 0,
"max": 0.2,
"default": 0.018,
"showSlider": True,
"stepSize": 0.001
},
"batchSize": {
"name": "Batch Size",
"description": "The number of transitions to consider simultaneously when updating the agent.",
"min": 1,
"max": 256,
"default": 32,
"showSlider": True,
"stepSize": 1
},
"memorySize": {
"name": "Memory Size",
"description": "The maximum number of timestep transitions to keep stored.",
"min": 1,
"max": 655360,
"default": 1000,
"showSlider": False
},
"targetInterval": {
"name": "Target Update Interval",
"description": "The distance in timesteps between target model updates.",
"min": 1,
"max": 100000,
"default": 200,
"showSlider": False
},
"historyLength": {
"name": "History Length",
"description": "",
"min": 0,
"max": 20,
"default": 10,
"showSlider": True,
"stepSize": 1
},
"alpha": {
"name": "Learning Rate",
"description": "The rate at which the parameters respond to environment observations.",
"min": 0,
"max": 1,
"default": 0.18,
"showSlider": True,
"stepSize": 0.01
},
"delta": {
"name": "Delta",
"description": "The normalized step size for computing the learning rate.",
"min": 0,
"max": 0.05,
"default": 0.001,
"showSlider": True,
"stepSize": 0.0001
},
"sigma": {
"name": "Sigma",
"description": "The standard deviation of additive noise.",
"min": 0.001,
"max": 1,
"default": 0.5,
"showSlider": True,
"stepSize": 0.001
},
"population": {
"name": "Population Size",
"description": "The size of the sample population.",
"min": 0,
"max": 100,
"default": 10,
"showSlider": True,
"stepSize": 1
},
"elite": {
"name": "Elite Fraction",
"description": "The proportion of the elite to consider for policy improvement.",
"min": 0.001,
"max": 1,
"default": 0.2,
"showSlider": True,
"stepSize": 0.001
},
"tau": {
"name": "Tau",
"description": "",
"min": 0,
"max": 1,
"default": 0.97,
"showSlider": True,
"stepSize": 0.001
},
"temperature": {
"name": "Temperature",
"description": "",
"min": 0,
"max": 1,
"default": 0.97,
"showSlider": True,
"stepSize": 0.001
},
"learningRate": {
"name": "Learning Rate",
"description": "",
"min": 0.0001,
"max": 1,
"default": 0.001,
"showSlider": True,
"stepSize": 0.001
},
"policyLearnRate": {
"name": "Policy Learning Rate",
"description": "",
"min": 0.0001,
"max": 1,
"default": 0.001,
"showSlider": True,
"stepSize": 0.001
},
"valueLearnRate": {
"name": "Value Learning Rate",
"description": "",
"min": 0.0001,
"max": 1,
"default": 0.001,
"showSlider": True,
"stepSize": 0.001
},
"horizon": {
"name": "Horizon",
"description": "",
"min": 10,
"max": 10000,
"default": 50,
"showSlider": True,
"stepSize": 0.001
},
"epochSize": {
"name": "Epoch Size",
"description": "",
"min": 10,
"max": 100000,
"default": 500,
"showSlider": True,
"stepSize": 0.001
},
"ppoEpsilon": {
"name": "PPO Epsilon",
"description": "",
"min": 0.0001,
"max": 0.5,
"default": 0.2,
"showSlider": True,
"stepSize": 0.0001
},
"ppoLambda": {
"name": "PPO Lambda",
"description": "",
"min": 0.5,
"max": 1,
"default": 0.95,
"showSlider": True,
"stepSize": 0.01
},
"valueLearnRatePlus": {
"name": "Value Learning Rate+",
"description": "",
"min": 0.0001,
"max": 1,
"default": 0.001,
"showSlider": True,
"stepSize": 0.001
}
}
def listInstances(ec2Client, inspector):
instances = []
response = ec2Client.describe_instances()
for reservation in response["Reservations"]:
for instance in reservation["Instances"]:
instances.append(instance)
return instances
def findOurInstance(ec2Client, jobID, inspector):
instances = listInstances(ec2Client, inspector)
for instance in instances:
if 'Tags' in instance and 'State' in instance:
if instance['State']['Name'] != 'pending' and instance['State']['Name'] != 'running':
continue
tags = instance['Tags']
for keyPair in tags:
if keyPair['Key'] == 'jobID' and keyPair['Value'] == str(jobID):
return instance
return None
def createInstance(ec2Client, ec2Resource, jobID, arguments, inspector):
try:
response = ec2Client.create_security_group(
GroupName='easyrlsecurity',
Description='EasyRL Security Group',
)
security_group_id = response['GroupId']
data = ec2Client.authorize_security_group_ingress(
GroupId=security_group_id,
IpPermissions=[
{'IpProtocol': 'tcp',
'FromPort': 80,
'ToPort': 80,
'IpRanges': [{'CidrIp': '0.0.0.0/0'}]},
{'IpProtocol': 'tcp',
'FromPort': 22,
'ToPort': 22,
'IpRanges': [{'CidrIp': '0.0.0.0/0'}]}
])
#inspector.addAttribute("securityGroupData", str(data))
except:
group_name = 'easyrlsecurity'
response = ec2Client.describe_security_groups(
Filters=[
dict(Name='group-name', Values=[group_name])
]
)
security_group_id = response['SecurityGroups'][0]['GroupId']
#inspector.addAttribute("securityGroupId", str(security_group_id))
instance = ec2Resource.create_instances(
ImageId=backendAMI,
MinCount=1,
MaxCount=1,
InstanceType=arguments['instanceType'],
SecurityGroupIds=[security_group_id],
TagSpecifications=[{
"ResourceType": "instance",
"Tags": [
{
'Key': 'jobID',
'Value': str(jobID)
}
]
}]
)
inspector.addAttribute("message", "created instance")
def terminateInstance(ec2Client, ec2Resource, ourInstance, inspector):
if (ourInstance is not None):
instance = ec2Resource.Instance(ourInstance['InstanceId'])
instance.terminate()
inspector.addAttribute("message", "terminated instance")
else:
inspector.addAttribute("error", "Instance not found.")
def yourFunction(request, context):
# Import the module and collect data
inspector = Inspector()
accessKey = request['accessKey']
secretKey = request['secretKey']
sessionToken = request['sessionToken']
jobID = request['jobID']
task = request['task']
arguments = request['arguments']
if ('episodes' in arguments):
arguments['episodes'] += 1
instanceID = ""
if ('instanceID' in arguments):
instanceID = arguments['instanceID']
if (instanceID is not None):
jobID += str(instanceID)
if ('gitHubURL' not in arguments):
arguments['gitHubURL'] = githubDefaultRepo
arguments['gitHubBranch'] = githubDefaultBranch
continuousTraining = False
if ("continuousTraining" in arguments):
continuousTraining = arguments["continuousTraining"]
modelName = "model.bin"
botoSession = boto3.Session(
aws_access_key_id=accessKey,
aws_secret_access_key=secretKey,
aws_session_token=sessionToken,
region_name=awsRegion
)
inspector.addAttribute("instanceStateText", "Loading...")
if 'instanceType' in arguments:
try:
inspector.addAttribute("cost", instanceInfo[arguments['instanceType']]['cost'])
inspector.addAttribute("info", instanceInfo[arguments['instanceType']])
except:
pass
if (task == "poll"):
ec2Client = botoSession.client('ec2')
ec2Resource = botoSession.resource('ec2')
s3Resource = botoSession.resource('s3')
try:
ourInstance = findOurInstance(ec2Client, jobID, inspector)
inspector.addAttribute("validCredentials", 1)
except:
inspector.addAttribute("validCredentials", 0)
return inspector.finish()
if (ourInstance is None):
createInstance(ec2Client, ec2Resource, jobID, arguments, inspector)
inspector.addAttribute("message", "creating instance")
inspector.addAttribute("instanceState", "booting")
inspector.addAttribute("instanceStateText", "Booting")
else:
# Check if it is ready to SSH...
try:
ip = ourInstance['PublicIpAddress']
inspector.addAttribute("ip", ip)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, username='tcss556', password='secretPassword')
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
"echo test")
stdout = ssh_stdout.readlines()
except:
inspector.addAttribute(
"error", "Problem creating ssh connection to " + str(ip) + " try again")
return inspector.finish()
if (stdout[0] == "test\n"):
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
"cat easyRL-v0/lambda/version_check1.txt")
instanceData = ssh_stdout.readlines()
# Has the version check? If not update
if (instanceData == []):
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
"mv easyRL-v0/ OLD" + str(random.randint(1,10000000)) + "/")
stdout = ssh_stdout.readlines()
if (sessionToken == ""):
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
"sleep " + str(arguments['killTime']) + " && python3.7 easyRL-v0/lambda/killSelf.py " + jobID + " " + accessKey + " " + secretKey + " &")
else:
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
"sleep " + str(arguments['killTime']) + " && python3.7 easyRL-v0/lambda/killSelf.py " + jobID + " " + accessKey + " " + secretKey + " " + sessionToken + " &")
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
"git clone --branch " + arguments['gitHubBranch'] + " " + arguments['gitHubURL'])
stdout = ssh_stdout.readlines() # DO NOT REMOVE
stderr = ssh_stderr.readlines() # DO NOT REMOVE
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
"echo " + arguments['instanceType'] + str(arguments['killTime']) + " > tag.txt")
inspector.addAttribute("instanceState", "updated")
inspector.addAttribute("instanceStateText", "Cloned Repository")
else:
# Instance type match the tag? If not reboot...
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
"cat tag.txt")
instanceData = ssh_stdout.readlines()
tag = arguments['instanceType'] + str(arguments['killTime'])
if (instanceData == [] or tag not in instanceData[0]):
terminateInstance(
ec2Client, ec2Resource, ourInstance, inspector)
createInstance(ec2Client, ec2Resource,
jobID, arguments, inspector)
try:
bucket = s3Resource.Bucket('easyrl-' + jobID)
bucket.objects.all().delete()
except:
pass
inspector.addAttribute('instanceState', "rebooting")
inspector.addAttribute("instanceStateText", "Recreating")
else:
# Is job running? If it is get progress. Else return idle.
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
"ps -aux | grep EasyRL.py")
stdout = ssh_stdout.readlines()
results = ""
for line in stdout:
results += line
if ("terminal" in results):
inspector.addAttribute(
'instanceState', "runningJob")
inspector.addAttribute("instanceStateText", "Running Task")
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
"cat ./arguments.json")
stdout = ssh_stdout.readlines()
#inspector.addAttribute("Test", str(stdout))
#return inspctor.finish()
if (stdout != []):
jobArguments = json.loads(stdout[0])
inspector.addAttribute(
"jobArguments", jobArguments)
if continuousTraining and jobArguments != arguments:
inspector.addAttribute('instanceState', "changingJob")
inspector.addAttribute("instanceStateText", "Changing Task")
task = "haltJob"
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
"cat ./data.json")
stdout = ssh_stdout.readlines()
if (stdout != []):
try:
inspector.addAttribute(
"progress", json.loads(stdout[0]))
except:
inspector.addAttribute("progress", "waiting")
else:
inspector.addAttribute("progress", "waiting")
else:
inspector.addAttribute('instanceState', "idle")
inspector.addAttribute("instanceStateText", "Idle")
if continuousTraining:
task = "runJob"
inspector.addAttribute('instanceState', "startingJob")
inspector.addAttribute("instanceStateText", "Starting Task")
else:
inspector.addAttribute('instanceState', "initializing")
inspector.addAttribute("instanceStateText", "Initializing")
ssh.close()
if (task == "runJob"):
ec2Client = botoSession.client('ec2')
ec2Resource = botoSession.resource('ec2')
s3Resource = botoSession.resource('s3')
try:
bucket = s3Resource.Bucket('easyrl-' + jobID)
bucket.objects.all().delete()
except:
pass
ourInstance = findOurInstance(ec2Client, jobID, inspector)
if (ourInstance is not None):
ip = ourInstance['PublicIpAddress']
inspector.addAttribute("ip", str(ip))
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, username='tcss556', password='secretPassword')
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
"ps -aux | grep EasyRL.py")
stdout = ssh_stdout.readlines()
except:
inspector.addAttribute(
"error", "Problem creating ssh connection to " + str(ip) + " try again")
return inspector.finish()
results = ""
for line in stdout:
results += line
if ("terminal" not in results):
# Error Checking
if (str(arguments['agent']) in paraMap):
missingAttributes = []
outOfRange = []
valid = True
envIndex = str(arguments['environment'])
agentIndex = str(arguments['agent'])
if envMap[envIndex]['type'] not in agentMap[agentIndex]['supportedEnvs']:
inspector.addAttribute("error", "Incompatible agent/environment pair!")
return inspector.finish()
for pp in paraMap[str(arguments['agent'])]:
pp = str(pp)
if pp not in arguments:
missingAttributes.append(pp)
else:
val = arguments[pp]
if (val < paramConditions[pp]['min'] or val > paramConditions[pp]['max']):
outOfRange.append(pp)
if len(missingAttributes) > 0:
inspector.addAttribute("error-Missing", "Missing hyperparameters for agent: " + str(missingAttributes))
valid = False
if len(outOfRange) > 0:
errorMessage = "Attributes with invalid value: "
for error in outOfRange:
errorMessage += error + " min: " + str(paramConditions[error]['min']) + " max: " + str(paramConditions[error]['max']) + " used: " + str(arguments[error]) + " "
inspector.addAttribute("error-Range", errorMessage)
valid = False
if (valid == False):
return inspector.finish()
else:
inspector.addAttribute("error", "Unknown Agent " + str(arguments['agent']))
return inspector.finish()
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
"echo \'" + json.dumps(arguments) + "\' > arguments.json")
stdout = ssh_stdout.readlines()
command = 'printf "'
command += str(arguments['environment']) + '\n'
command += str(arguments['agent']) + '\n'
command += '1\n'
paramList = paraMap[str(arguments['agent'])]
for param in paramList:
command += str(arguments[param]) + '\n'
command += '4\n'
command += modelName + '\n'
command += '5\n'
if (sessionToken != ""):
command += '" | python3.7 ./easyRL-v0/EasyRL.py --terminal --secretKey ' + secretKey + \
' --accessKey ' + accessKey + ' --sessionToken ' + \
sessionToken + ' --jobID ' + jobID
else:
command += '" | python3.7 ./easyRL-v0/EasyRL.py --terminal --secretKey ' + \
secretKey + ' --accessKey ' + accessKey + ' --jobID ' + jobID
command += ' &> lastJobLog.txt & sleep 1'
#inspector.addAttribute("command", command)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(command)
stdout = ssh_stdout.readlines()
#inspector.addAttribute("stdout", stdout)
ssh.close()
inspector.addAttribute("message", "Job started")
else:
inspector.addAttribute("message", "Job already running")
else:
inspector.addAttribute('error', 'Instance not found.')
if (task == "runTest"):
ec2Client = botoSession.client('ec2')
ec2Resource = botoSession.resource('ec2')
s3Resource = botoSession.resource('s3')
try:
bucket = s3Resource.Bucket('easyrl-' + jobID)
bucket.objects.all().delete()
except:
pass
ourInstance = findOurInstance(ec2Client, jobID, inspector)
if (ourInstance is not None):
ip = ourInstance['PublicIpAddress']
inspector.addAttribute("ip", str(ip))
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, username='tcss556', password='secretPassword')
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
"ps -aux | grep EasyRL.py")
stdout = ssh_stdout.readlines()
except:
inspector.addAttribute(
"error", "Problem creating ssh connection to " + str(ip) + " try again")
return inspector.finish()
results = ""
for line in stdout:
results += line
if ("terminal" not in results):
# Error Checking
if (str(arguments['agent']) in paraMap):
missingAttributes = []
outOfRange = []
valid = True
envIndex = str(arguments['environment'])
agentIndex = str(arguments['agent'])
if envMap[envIndex]['type'] not in agentMap[agentIndex]['supportedEnvs']:
inspector.addAttribute("error", "Incompatible agent/environment pair!")
return inspector.finish()
for pp in paraMap[str(arguments['agent'])]:
pp = str(pp)
if pp not in arguments:
missingAttributes.append(pp)
else:
val = arguments[pp]
if (val < paramConditions[pp]['min'] or val > paramConditions[pp]['max']):
outOfRange.append(pp)
if len(missingAttributes) > 0:
inspector.addAttribute("error-Missing", "Missing hyperparameters for agent: " + str(missingAttributes))
valid = False
if len(outOfRange) > 0:
errorMessage = "Attributes with invalid value: "
for error in outOfRange:
errorMessage += error + " min: " + str(paramConditions[error]['min']) + " max: " + str(paramConditions[error]['max']) + " used: " + str(arguments[error]) + " "
inspector.addAttribute("error-Range", errorMessage)
valid = False
if (valid == False):
return inspector.finish()
else:
inspector.addAttribute("error", "Unknown Agent " + str(arguments['agent']))
return inspector.finish()
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
"echo \'" + json.dumps(arguments) + "\' > arguments.json")
stdout = ssh_stdout.readlines()
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
"md5sum " + modelName)
instanceData = ssh_stdout.readlines()
# Has the tag? If not update
if (instanceData != []):
command = 'printf "'
command += str(arguments['environment']) + '\n'
command += str(arguments['agent']) + '\n'
command += '2\n'
command += modelName + '\n'
command += '3\n'
paramList = paraMap[str(arguments['agent'])]
for param in paramList:
command += str(arguments[param]) + '\n'
command += '5\n'
if (sessionToken != ""):
command += '" | python3.7 ./easyRL-v0/EasyRL.py --terminal --secretKey ' + secretKey + \
' --accessKey ' + accessKey + ' --sessionToken ' + \
sessionToken + ' --jobID ' + jobID
else:
command += '" | python3.7 ./easyRL-v0/EasyRL.py --terminal --secretKey ' + \
secretKey + ' --accessKey ' + accessKey + ' --jobID ' + jobID
command += ' &> lastJobLog.txt & sleep 1'
#inspector.addAttribute("command", command)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(command)
stdout = ssh_stdout.readlines()
#inspector.addAttribute("stdout", stdout)
ssh.close()
inspector.addAttribute("message", "Test started")
else:
ssh.close()
inspector.addAttribute("error", "No trained agent found")
else:
inspector.addAttribute("message", "Test already running")
else:
inspector.addAttribute('error', 'Instance not found.')
if (task == "haltJob"):
ec2Client = botoSession.client('ec2')
ec2Resource = botoSession.resource('ec2')
ourInstance = findOurInstance(ec2Client, jobID, inspector)
if (ourInstance is not None):
ip = ourInstance['PublicIpAddress']
#inspector.addAttribute("ip", str(ip))
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, username='tcss556', password='secretPassword')
command = "pkill python3.7"
#inspector.addAttribute("command", command)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(command)
stdout = ssh_stdout.readlines()
#inspector.addAttribute("stdout", stdout)
ssh.close()
inspector.addAttribute("message", "Job halted.")
else:
inspector.addAttribute("error", "Instance not found.")
if (task == "exportModel"):
ec2Client = botoSession.client('ec2')
ec2Resource = botoSession.resource('ec2')
ourInstance = findOurInstance(ec2Client, jobID, inspector)
if (ourInstance is not None):
ip = ourInstance['PublicIpAddress']
#inspector.addAttribute("ip", str(ip))
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, username='tcss556', password='secretPassword')
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
"md5sum " + modelName)
instanceData = ssh_stdout.readlines()
# Has the tag? If not update
if (instanceData != []):
if (sessionToken == ""):
command = "python3.7 easyRL-v0/lambda/upload.py " + modelName + " " + jobID + " " + accessKey + " " + secretKey
else:
command = "python3.7 easyRL-v0/lambda/upload.py " + modelName + " " + jobID + " " + accessKey + " " + secretKey + " " + sessionToken
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(command)
stdout = ssh_stdout.readlines()
inspector.addAttribute("url", "https://easyrl-" + str(jobID) + ".s3.amazonaws.com/" + modelName)
else:
inspector.addAttribute("error", "Model not trained yet!")
ssh.close()
else:
inspector.addAttribute("error", "Instance not found.")
if (task == "import"):
ec2Client = botoSession.client('ec2')
ec2Resource = botoSession.resource('ec2')
ourInstance = findOurInstance(ec2Client, jobID, inspector)
if (ourInstance is not None):
ip = ourInstance['PublicIpAddress']
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, username='tcss556', password='secretPassword')
if (sessionToken == ""):
command = "python3.7 easyRL-v0/lambda/download.py " + modelName + " " + jobID + " " + accessKey + " " + secretKey
else:
command = "python3.7 easyRL-v0/lambda/download.py " + modelName + " " + jobID + " " + accessKey + " " + secretKey + " " + sessionToken
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(command)
stdout = ssh_stdout.readlines()
inspector.addAttribute("error", stdout)
ssh.close()
else:
inspector.addAttribute("error", "Instance not found.")
if (task == "jobLog"):
ec2Client = botoSession.client('ec2')
ec2Resource = botoSession.resource('ec2')
ourInstance = findOurInstance(ec2Client, jobID, inspector)
if (ourInstance is not None):
ip = ourInstance['PublicIpAddress']
#inspector.addAttribute("ip", str(ip))
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, username='tcss556', password='secretPassword')
if (sessionToken == ""):
command = "python3.7 easyRL-v0/lambda/upload.py lastJobLog.txt " + jobID + " " + accessKey + " " + secretKey
else:
command = "python3.7 easyRL-v0/lambda/upload.py lastJobLog.txt " + jobID + " " + accessKey + " " + secretKey + " " + sessionToken
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(command)
stdout = ssh_stdout.readlines()
inspector.addAttribute("url", "https://easyrl-" + str(jobID) + ".s3.amazonaws.com/lastJobLog.txt")
ssh.close()
else:
inspector.addAttribute("error", "Instance not found.")
if (task == "terminateInstance"):
ec2Client = botoSession.client('ec2')
ec2Resource = botoSession.resource('ec2')
s3Resource = botoSession.resource('s3')
try:
bucket = s3Resource.Bucket('easyrl-' + jobID)
bucket.objects.all().delete()
except:
pass
ourInstance = findOurInstance(ec2Client, jobID, inspector)
terminateInstance(ec2Client, ec2Resource, ourInstance, inspector)
if (task == "info"):
inspector.addAttribute("environments", envList)
inspector.addAttribute("environmentsMap", envMap)
inspector.addAttribute("parameters", paramConditions)
combinedAgents = []
for agent in agentList:
agent['parameters'] = paraMap[agent['index']]
combinedAgents.append(agent)
combinedAgentsMap = {}
for aa in combinedAgents:
combinedAgentsMap[aa['index']] = aa
inspector.addAttribute("agents", combinedAgents)
inspector.addAttribute("agentsMap", combinedAgentsMap)
return inspector.finish()
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,604 | RobertCordingly/easyRL-v0 | refs/heads/master | /webpage/easyRL_app/utilities.py | import boto3
import botocore
import uuid
from easyRL_app import apps
import os
import json
import core
def get_aws_s3(aws_access_key_id, aws_secret_access_key, aws_session_token=None):
return boto3.client('s3',
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token
)
def get_aws_lambda(aws_access_key_id, aws_secret_access_key, aws_session_token=None, region_name='us-east-1'):
return boto3.client('lambda',
region_name=region_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
)
def is_valid_aws_credential(aws_access_key_id, aws_secret_access_key, aws_session_token=None):
try:
boto3.client('sts',
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
).get_caller_identity()
return True
except botocore.exceptions.ClientError:
return False
def list_items_in_bucket(aws_access_key, aws_secret_key, bucket_name):
try:
s3 = get_aws_s3(aws_access_key, aws_secret_key)
return [item['Key'] for item in s3.list_objects(Bucket=bucket_name)['Contents']]
except botocore.exceptions.ClientError:
return None
def download_item_in_bucket(aws_access_key, aws_secret_key, bucket_name, bucket_filename, local_filename):
try:
s3 = get_aws_s3(aws_access_key, aws_secret_key)
s3.download_file(bucket_name, bucket_filename, local_filename)
return True
except botocore.exceptions.ClientError:
return False
def get_recent_training_data(aws_access_key, aws_secret_key, bucket_name):
local_data_file = apps.LOCAL_JSON_FILE.format(bucket_name)
download_item_in_bucket(aws_access_key, aws_secret_key, bucket_name, apps.DATA_JSON_FILE, local_data_file)
# read the data.json local file the name is changed to JOB_ID.json in /tmp directory
file_content = get_file_content_then_delete_file(local_data_file, 'r')
# parse the JSON content to JSON object
json_content = json.loads(file_content)
last_episode = json_content['episodes'][-1]
episodeNo = last_episode['episode']
# store the values
avgLoss = last_episode['avgLoss']
avgEpsilon = last_episode['avgEpsilon']
totalReward = last_episode['totalReward']
avgReward = json_content['avgReward']
# read the image data
image_file = apps.IMAGE_FILE.format(episodeNo)
image_local_file = "{}/static/{}-{}".format(str(core.settings.BASE_DIR), bucket_name, image_file)
download_item_in_bucket(aws_access_key, aws_secret_key, bucket_name, image_file, image_local_file)
image_data = get_file_content_then_delete_file(image_local_file, 'rb')
return avgLoss, avgEpsilon, totalReward, avgReward, image_data
def invoke_aws_lambda_func(lambdas, data='{}'):
# lambdas.list_functions()
return lambdas.invoke(
FunctionName='cloudBridge',
InvocationType='RequestResponse',
Payload=data,
)
def get_file_content_then_delete_file(file_path, option):
file = open(file_path, option)
file_content = file.read()
file.close()
os.remove(file_path)
return file_content
def generate_jobID():
return str(uuid.uuid4())
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,605 | RobertCordingly/easyRL-v0 | refs/heads/master | /webpage/easyRL_app/views.py | from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from . import forms
import time
import json
import boto3
import os
from easyRL_app.utilities import get_aws_lambda,\
invoke_aws_lambda_func, is_valid_aws_credential, generate_jobID
from easyRL_app import apps
DEBUG_JOB_ID = generate_jobID()
session = boto3.session.Session()
# Create your views here.
def index(request):
# send the user back to the login form if the user did not sign in or session expired
debug_sessions(request)
if 'aws_succeed' not in request.session or not request.session['aws_succeed']:
return HttpResponseRedirect("/easyRL_app/login/")
index_dict = {}
files = os.listdir(os.path.join(settings.BASE_DIR, "static/easyRL_app/images"))
index_dict['files'] = files
form = forms.HyperParameterFormDeepQ()
info = lambda_info(request.session['aws_access_key'],
request.session['aws_secret_key'],
request.session['aws_security_token'],
request.session['job_id'],{})
index_dict['info'] = add_file_to_info(info, files)
if request.method == "GET":
index_dict['form'] = form
return render(request, "easyRL_app/index.html", context=index_dict)
elif request.method == "POST":
form = forms.HyperParameterFormDeepQ(request.POST)
if form.is_valid():
index_dict['form'] = form
return render(request, "easyRL_app/index.html", context=index_dict)
def login(request):
form = forms.AwsCredentialForm()
if request.method == "GET":
return render(request, "easyRL_app/login.html", context={'form': form})
elif request.method == "POST":
form = forms.AwsCredentialForm(request.POST)
if form.is_valid() and is_valid_aws_credential(
form.cleaned_data["aws_access_key"],
form.cleaned_data["aws_secret_key"],
form.cleaned_data["aws_security_token"]):
request.session['aws_access_key'] = form.cleaned_data["aws_access_key"]
request.session['aws_secret_key'] = form.cleaned_data["aws_secret_key"]
request.session['aws_security_token'] = form.cleaned_data["aws_security_token"]
request.session['job_id'] = generate_jobID()
# create ec2 instance
debug_sessions(request)
#lambda_create_instance(
# request.session['aws_access_key'],
# request.session['aws_secret_key'],
# request.session['aws_security_token'],
# request.session['job_id'],
# {}
#)
request.session['aws_succeed'] = True
return HttpResponseRedirect("/easyRL_app/")
else:
request.session['aws_succeed'] = False
return HttpResponseRedirect("/easyRL_app/login/")
def logout(request):
# store the keys (to avoid deep copy)
keys = [key for key in request.session.keys()]
# terminate the instance for the user
lambda_terminate_instance(
request.session['aws_access_key'],
request.session['aws_secret_key'],
request.session['aws_security_token'],
request.session['job_id'],
{
"instanceType": get_safe_value(str, request.POST.get("instanceType"), "c4.xlarge")
,"instanceID": get_safe_value(str, request.POST.get("instanceID"), "")
,"killTime": get_safe_value(int, request.POST.get("killTime"), 600)
,"environment": get_safe_value(int, request.POST.get("environment"), 1)
,"continuousTraining" : get_safe_value(str, request.POST.get("continuousTraining"), "False")
,"agent": get_safe_value(int, request.POST.get("agent"), 1)
,"episodes": get_safe_value(int, request.POST.get("episodes"), 20)
,"steps": get_safe_value(int, request.POST.get("steps"), 50)
,"gamma": get_safe_value(float, request.POST.get("gamma"), 0.97)
,"minEpsilon": get_safe_value(float, request.POST.get("minEpsilon"), 0.01)
,"maxEpsilon": get_safe_value(float, request.POST.get("maxEpsilon"), 0.99)
,"decayRate": get_safe_value(float, request.POST.get("decayRate"), 0.01)
,"batchSize": get_safe_value(int, request.POST.get("batchSize"), 32)
,"memorySize": get_safe_value(int, request.POST.get("memorySize"), 1000)
,"targetInterval": get_safe_value(int, request.POST.get("targetInterval"), 10)
,"alpha": get_safe_value(float, request.POST.get("alpha"), 0.9)
,"historyLength": get_safe_value(int, request.POST.get("historyLength"), 10)
,"delta": get_safe_value(int, request.POST.get("delta"), 0.001)
,"sigma": get_safe_value(int, request.POST.get("sigma"), 0.5)
,"population": get_safe_value(int, request.POST.get("population"), 10)
,"elite": get_safe_value(int, request.POST.get("elite"), 0.2)
,"tau": get_safe_value(int, request.POST.get("tau"), 0.97)
,"temperature": get_safe_value(int, request.POST.get("temperature"), 0.97)
,"learningRate": get_safe_value(int, request.POST.get("learningRate"), 0.001)
,"policyLearnRate": get_safe_value(int, request.POST.get("policyLearnRate"), 0.001)
,"valueLearnRate": get_safe_value(int, request.POST.get("valueLearnRate"), 0.001)
,"horizon": get_safe_value(int, request.POST.get("horizon"), 50)
,"epochSize": get_safe_value(int, request.POST.get("epochSize"), 500)
,"ppoEpsilon": get_safe_value(int, request.POST.get("ppoEpsilon"), 0.2)
,"ppoLambda": get_safe_value(int, request.POST.get("ppoLambda"), 0.95)
,"valueLearnRatePlus": get_safe_value(int, request.POST.get("valueLearnRatePlus"), 0.001)
}
)
# clear up all sessions
for key in keys:
del request.session[key]
return HttpResponseRedirect("/easyRL_app/login/")
@csrf_exempt
def train(request):
debug_sessions(request)
if 'aws_succeed' not in request.session or not request.session['aws_succeed']:
return HttpResponse(apps.ERROR_UNAUTHENTICATED)
print("{}request_parameters{}={}".format(apps.FORMAT_BLUE, apps.FORMAT_RESET, debug_parameters(request)))
return HttpResponse(lambda_run_job(
request.session['aws_access_key'],
request.session['aws_secret_key'],
request.session['aws_security_token'],
request.session['job_id'],
{
"instanceType": get_safe_value(str, request.POST.get("c4.xlarge"), "c4.xlarge")
,"instanceID": get_safe_value(str, request.POST.get("instanceID"), "")
,"killTime": get_safe_value(int, request.POST.get("killTime"), 600)
,"environment": get_safe_value(int, request.POST.get("environment"), 1)
,"continuousTraining" : get_safe_value(str, request.POST.get("continuousTraining"), "False")
,"agent": get_safe_value(int, request.POST.get("agent"), 1)
,"episodes": get_safe_value(int, request.POST.get("episodes"), 20)
,"steps": get_safe_value(int, request.POST.get("steps"), 50)
,"gamma": get_safe_value(float, request.POST.get("gamma"), 0.97)
,"minEpsilon": get_safe_value(float, request.POST.get("minEpsilon"), 0.01)
,"maxEpsilon": get_safe_value(float, request.POST.get("maxEpsilon"), 0.99)
,"decayRate": get_safe_value(float, request.POST.get("decayRate"), 0.01)
,"batchSize": get_safe_value(int, request.POST.get("batchSize"), 32)
,"memorySize": get_safe_value(int, request.POST.get("memorySize"), 1000)
,"targetInterval": get_safe_value(int, request.POST.get("targetInterval"), 10)
,"alpha": get_safe_value(float, request.POST.get("alpha"), 0.9)
,"historyLength": get_safe_value(int, request.POST.get("historyLength"), 10)
,"delta": get_safe_value(int, request.POST.get("delta"), 0.001)
,"sigma": get_safe_value(int, request.POST.get("sigma"), 0.5)
,"population": get_safe_value(int, request.POST.get("population"), 10)
,"elite": get_safe_value(int, request.POST.get("elite"), 0.2)
,"tau": get_safe_value(int, request.POST.get("tau"), 0.97)
,"temperature": get_safe_value(int, request.POST.get("temperature"), 0.97)
,"learningRate": get_safe_value(int, request.POST.get("learningRate"), 0.001)
,"policyLearnRate": get_safe_value(int, request.POST.get("policyLearnRate"), 0.001)
,"valueLearnRate": get_safe_value(int, request.POST.get("valueLearnRate"), 0.001)
,"horizon": get_safe_value(int, request.POST.get("horizon"), 50)
,"epochSize": get_safe_value(int, request.POST.get("epochSize"), 500)
,"ppoEpsilon": get_safe_value(int, request.POST.get("ppoEpsilon"), 0.2)
,"ppoLambda": get_safe_value(int, request.POST.get("ppoLambda"), 0.95)
,"valueLearnRatePlus": get_safe_value(int, request.POST.get("valueLearnRatePlus"), 0.001)
}
))
@csrf_exempt
def test(request):
debug_sessions(request)
if 'aws_succeed' not in request.session or not request.session['aws_succeed']:
return HttpResponse(apps.ERROR_UNAUTHENTICATED)
print("{}request_parameters{}={}".format(apps.FORMAT_BLUE, apps.FORMAT_RESET, debug_parameters(request)))
return HttpResponse(lambda_test_job(
request.session['aws_access_key'],
request.session['aws_secret_key'],
request.session['aws_security_token'],
request.session['job_id'],
{
"instanceType": get_safe_value(str, request.POST.get("instanceType"), "c4.xlarge")
,"instanceID": get_safe_value(str, request.POST.get("instanceID"), "")
,"killTime": get_safe_value(int, request.POST.get("killTime"), 600)
,"environment": get_safe_value(int, request.POST.get("environment"), 1)
,"continuousTraining" : get_safe_value(str, request.POST.get("continuousTraining"), "False")
,"agent": get_safe_value(int, request.POST.get("agent"), 1)
,"episodes": get_safe_value(int, request.POST.get("episodes"), 20)
,"steps": get_safe_value(int, request.POST.get("steps"), 50)
,"gamma": get_safe_value(float, request.POST.get("gamma"), 0.97)
,"minEpsilon": get_safe_value(float, request.POST.get("minEpsilon"), 0.01)
,"maxEpsilon": get_safe_value(float, request.POST.get("maxEpsilon"), 0.99)
,"decayRate": get_safe_value(float, request.POST.get("decayRate"), 0.01)
,"batchSize": get_safe_value(int, request.POST.get("batchSize"), 32)
,"memorySize": get_safe_value(int, request.POST.get("memorySize"), 1000)
,"targetInterval": get_safe_value(int, request.POST.get("targetInterval"), 10)
,"alpha": get_safe_value(float, request.POST.get("alpha"), 0.9)
,"historyLength": get_safe_value(int, request.POST.get("historyLength"), 10)
,"delta": get_safe_value(int, request.POST.get("delta"), 0.001)
,"sigma": get_safe_value(int, request.POST.get("sigma"), 0.5)
,"population": get_safe_value(int, request.POST.get("population"), 10)
,"elite": get_safe_value(int, request.POST.get("elite"), 0.2)
,"tau": get_safe_value(int, request.POST.get("tau"), 0.97)
,"temperature": get_safe_value(int, request.POST.get("temperature"), 0.97)
,"learningRate": get_safe_value(int, request.POST.get("learningRate"), 0.001)
,"policyLearnRate": get_safe_value(int, request.POST.get("policyLearnRate"), 0.001)
,"valueLearnRate": get_safe_value(int, request.POST.get("valueLearnRate"), 0.001)
,"horizon": get_safe_value(int, request.POST.get("horizon"), 50)
,"epochSize": get_safe_value(int, request.POST.get("epochSize"), 500)
,"ppoEpsilon": get_safe_value(int, request.POST.get("ppoEpsilon"), 0.2)
,"ppoLambda": get_safe_value(int, request.POST.get("ppoLambda"), 0.95)
,"valueLearnRatePlus": get_safe_value(int, request.POST.get("valueLearnRatePlus"), 0.001)
}
))
@csrf_exempt
def poll(request):
try:
debug_sessions(request)
if 'aws_succeed' not in request.session or not request.session['aws_succeed']:
return HttpResponse(apps.ERROR_UNAUTHENTICATED)
print("{}request_parameters{}={}".format(apps.FORMAT_BLUE, apps.FORMAT_RESET, debug_parameters(request)))
response = HttpResponse(lambda_poll(
request.session['aws_access_key'],
request.session['aws_secret_key'],
request.session['aws_security_token'],
request.session['job_id'],
{
"instanceType": get_safe_value(str, request.POST.get("instanceType"), "c4.xlarge")
,"instanceID": get_safe_value(str, request.POST.get("instanceID"), "")
,"killTime": get_safe_value(int, request.POST.get("killTime"), 600)
,"continuousTraining" : get_safe_value(int, request.POST.get("continuousTraining"), 0)
,"environment": get_safe_value(int, request.POST.get("environment"), 1)
,"agent": get_safe_value(int, request.POST.get("agent"), 1)
,"episodes": get_safe_value(int, request.POST.get("episodes"), 20)
,"steps": get_safe_value(int, request.POST.get("steps"), 50)
,"gamma": get_safe_value(float, request.POST.get("gamma"), 0.97)
,"minEpsilon": get_safe_value(float, request.POST.get("minEpsilon"), 0.01)
,"maxEpsilon": get_safe_value(float, request.POST.get("maxEpsilon"), 0.99)
,"decayRate": get_safe_value(float, request.POST.get("decayRate"), 0.01)
,"batchSize": get_safe_value(int, request.POST.get("batchSize"), 32)
,"memorySize": get_safe_value(int, request.POST.get("memorySize"), 1000)
,"targetInterval": get_safe_value(int, request.POST.get("targetInterval"), 10)
,"alpha": get_safe_value(float, request.POST.get("alpha"), 0.9)
,"historyLength": get_safe_value(int, request.POST.get("historyLength"), 10)
,"delta": get_safe_value(int, request.POST.get("delta"), 0.001)
,"sigma": get_safe_value(int, request.POST.get("sigma"), 0.5)
,"population": get_safe_value(int, request.POST.get("population"), 10)
,"elite": get_safe_value(int, request.POST.get("elite"), 0.2)
,"tau": get_safe_value(int, request.POST.get("tau"), 0.97)
,"temperature": get_safe_value(int, request.POST.get("temperature"), 0.97)
,"learningRate": get_safe_value(int, request.POST.get("learningRate"), 0.001)
,"policyLearnRate": get_safe_value(int, request.POST.get("policyLearnRate"), 0.001)
,"valueLearnRate": get_safe_value(int, request.POST.get("valueLearnRate"), 0.001)
,"horizon": get_safe_value(int, request.POST.get("horizon"), 50)
,"epochSize": get_safe_value(int, request.POST.get("epochSize"), 500)
,"ppoEpsilon": get_safe_value(int, request.POST.get("ppoEpsilon"), 0.2)
,"ppoLambda": get_safe_value(int, request.POST.get("ppoLambda"), 0.95)
,"valueLearnRatePlus": get_safe_value(int, request.POST.get("valueLearnRatePlus"), 0.001)
}
))
return response
except:
return {
"instanceState": "booting",
"instanceStateText": "Loading..."
}
@csrf_exempt
def info(request):
debug_sessions(request)
if 'aws_succeed' not in request.session or not request.session['aws_succeed']:
return HttpResponse(apps.ERROR_UNAUTHENTICATED)
print("{}request_parameters{}={}".format(apps.FORMAT_BLUE, apps.FORMAT_RESET, debug_parameters(request)))
return HttpResponse(lambda_info(
request.session['aws_access_key'],
request.session['aws_secret_key'],
request.session['aws_security_token'],
request.session['job_id'],
{}
))
@csrf_exempt
def import_model_lambda(request):
debug_sessions(request)
if 'aws_succeed' not in request.session or not request.session['aws_succeed']:
return HttpResponse(apps.ERROR_UNAUTHENTICATED)
print("{}request_parameters{}={}".format(apps.FORMAT_BLUE, apps.FORMAT_RESET, debug_parameters(request)))
return HttpResponseRedirect('/easyRL_app/',lambda_import(
request.session['aws_access_key'],
request.session['aws_secret_key'],
request.session['aws_security_token'],
request.session['job_id'],
{}
))
@csrf_exempt
def export_model(request):
debug_sessions(request)
if 'aws_succeed' not in request.session or not request.session['aws_succeed']:
return HttpResponse(apps.ERROR_UNAUTHENTICATED)
print("{}request_parameters{}={}".format(apps.FORMAT_BLUE, apps.FORMAT_RESET, debug_parameters(request)))
return HttpResponse(lambda_export_model(
request.session['aws_access_key'],
request.session['aws_secret_key'],
request.session['aws_security_token'],
request.session['job_id'],
{
"instanceType": get_safe_value(str, request.POST.get("instanceType"), "c4.xlarge")
,"instanceID": get_safe_value(str, request.POST.get("instanceID"), "")
,"killTime": get_safe_value(int, request.POST.get("killTime"), 600)
,"environment": get_safe_value(int, request.POST.get("environment"), 1)
,"continuousTraining" : get_safe_value(str, request.POST.get("continuousTraining"), "False")
,"agent": get_safe_value(int, request.POST.get("agent"), 1)
,"episodes": get_safe_value(int, request.POST.get("episodes"), 20)
,"steps": get_safe_value(int, request.POST.get("steps"), 50)
,"gamma": get_safe_value(float, request.POST.get("gamma"), 0.97)
,"minEpsilon": get_safe_value(float, request.POST.get("minEpsilon"), 0.01)
,"maxEpsilon": get_safe_value(float, request.POST.get("maxEpsilon"), 0.99)
,"decayRate": get_safe_value(float, request.POST.get("decayRate"), 0.01)
,"batchSize": get_safe_value(int, request.POST.get("batchSize"), 32)
,"memorySize": get_safe_value(int, request.POST.get("memorySize"), 1000)
,"targetInterval": get_safe_value(int, request.POST.get("targetInterval"), 10)
,"alpha": get_safe_value(float, request.POST.get("alpha"), 0.9)
,"historyLength": get_safe_value(int, request.POST.get("historyLength"), 10)
,"delta": get_safe_value(int, request.POST.get("delta"), 0.001)
,"sigma": get_safe_value(int, request.POST.get("sigma"), 0.5)
,"population": get_safe_value(int, request.POST.get("population"), 10)
,"elite": get_safe_value(int, request.POST.get("elite"), 0.2)
,"tau": get_safe_value(int, request.POST.get("tau"), 0.97)
,"temperature": get_safe_value(int, request.POST.get("temperature"), 0.97)
,"learningRate": get_safe_value(int, request.POST.get("learningRate"), 0.001)
,"policyLearnRate": get_safe_value(int, request.POST.get("policyLearnRate"), 0.001)
,"valueLearnRate": get_safe_value(int, request.POST.get("valueLearnRate"), 0.001)
,"horizon": get_safe_value(int, request.POST.get("horizon"), 50)
,"epochSize": get_safe_value(int, request.POST.get("epochSize"), 500)
,"ppoEpsilon": get_safe_value(int, request.POST.get("ppoEpsilon"), 0.2)
,"ppoLambda": get_safe_value(int, request.POST.get("ppoLambda"), 0.95)
,"valueLearnRatePlus": get_safe_value(int, request.POST.get("valueLearnRatePlus"), 0.001)
}
))
@csrf_exempt
def halt(request):
debug_sessions(request)
if 'aws_succeed' not in request.session or not request.session['aws_succeed']:
return HttpResponse(apps.ERROR_UNAUTHENTICATED)
print("{}request_parameters{}={}".format(apps.FORMAT_BLUE, apps.FORMAT_RESET, debug_parameters(request)))
return HttpResponse(lambda_halt_job(
request.session['aws_access_key'],
request.session['aws_secret_key'],
request.session['aws_security_token'],
request.session['job_id'],
{
"instanceType": get_safe_value(str, request.POST.get("instanceType"), "c4.xlarge")
,"instanceID": get_safe_value(str, request.POST.get("instanceID"), "")
,"killTime": get_safe_value(int, request.POST.get("killTime"), 600)
,"environment": get_safe_value(int, request.POST.get("environment"), 1)
,"continuousTraining" : get_safe_value(str, request.POST.get("continuousTraining"), "False")
,"agent": get_safe_value(int, request.POST.get("agent"), 1)
,"episodes": get_safe_value(int, request.POST.get("episodes"), 20)
,"steps": get_safe_value(int, request.POST.get("steps"), 50)
,"gamma": get_safe_value(float, request.POST.get("gamma"), 0.97)
,"minEpsilon": get_safe_value(float, request.POST.get("minEpsilon"), 0.01)
,"maxEpsilon": get_safe_value(float, request.POST.get("maxEpsilon"), 0.99)
,"decayRate": get_safe_value(float, request.POST.get("decayRate"), 0.01)
,"batchSize": get_safe_value(int, request.POST.get("batchSize"), 32)
,"memorySize": get_safe_value(int, request.POST.get("memorySize"), 1000)
,"targetInterval": get_safe_value(int, request.POST.get("targetInterval"), 10)
,"alpha": get_safe_value(float, request.POST.get("alpha"), 0.9)
,"historyLength": get_safe_value(int, request.POST.get("historyLength"), 10)
,"delta": get_safe_value(int, request.POST.get("delta"), 0.001)
,"sigma": get_safe_value(int, request.POST.get("sigma"), 0.5)
,"population": get_safe_value(int, request.POST.get("population"), 10)
,"elite": get_safe_value(int, request.POST.get("elite"), 0.2)
,"tau": get_safe_value(int, request.POST.get("tau"), 0.97)
,"temperature": get_safe_value(int, request.POST.get("temperature"), 0.97)
,"learningRate": get_safe_value(int, request.POST.get("learningRate"), 0.001)
,"policyLearnRate": get_safe_value(int, request.POST.get("policyLearnRate"), 0.001)
,"valueLearnRate": get_safe_value(int, request.POST.get("valueLearnRate"), 0.001)
,"horizon": get_safe_value(int, request.POST.get("horizon"), 50)
,"epochSize": get_safe_value(int, request.POST.get("epochSize"), 500)
,"ppoEpsilon": get_safe_value(int, request.POST.get("ppoEpsilon"), 0.2)
,"ppoLambda": get_safe_value(int, request.POST.get("ppoLambda"), 0.95)
,"valueLearnRatePlus": get_safe_value(int, request.POST.get("valueLearnRatePlus"), 0.001)
}
))
'''
def lambda_create_instance(aws_access_key, aws_secret_key, aws_security_token, job_id, arguments):
lambdas = get_aws_lambda(os.getenv("AWS_ACCESS_KEY_ID"), os.getenv("AWS_SECRET_ACCESS_KEY"))
data = {
"accessKey": aws_access_key,
"secretKey": aws_secret_key,
"sessionToken": aws_security_token,
"jobID": job_id,
"task": apps.TASK_CREATE_INSTANCE,
"arguments": arguments,
}
response = invoke_aws_lambda_func(lambdas, str(data).replace('\'','"'))
print("{}lambda_create_instance{}={}".format(apps.FORMAT_RED, apps.FORMAT_RESET, response['Payload'].read()))
if response['StatusCode'] == 200:
streambody = response['Payload'].read().decode()
print("{}stream_body{}={}".format(apps.FORMAT_BLUE, apps.FORMAT_RESET, streambody))
return True
return False
'''
def lambda_import(aws_access_key, aws_secret_key, aws_security_token, job_id,arguments):
lambdas = get_aws_lambda(os.getenv("AWS_ACCESS_KEY_ID"), os.getenv("AWS_SECRET_ACCESS_KEY"))
data = {
"accessKey": aws_access_key,
"secretKey": aws_secret_key,
"sessionToken": aws_security_token,
"jobID": job_id,
"task": apps.TASK_IMPORT,
"arguments": arguments,
}
response = invoke_aws_lambda_func(lambdas, str(data).replace('\'','"'))
print("{}lambda_terminate_instance{}={}".format(apps.FORMAT_RED, apps.FORMAT_RESET, response['Payload'].read()))
if response['StatusCode'] == 200:
streambody = response['Payload'].read().decode()
print("{}stream_body{}={}".format(apps.FORMAT_BLUE, apps.FORMAT_RESET, streambody))
return True
return False
def lambda_terminate_instance(aws_access_key, aws_secret_key, aws_security_token, job_id, arguments):
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html
lambdas = get_aws_lambda(os.getenv("AWS_ACCESS_KEY_ID"), os.getenv("AWS_SECRET_ACCESS_KEY"))
data = {
"accessKey": aws_access_key,
"secretKey": aws_secret_key,
"sessionToken": aws_security_token,
"jobID": job_id,
"task": apps.TASK_TERMINAL_INSTANCE,
"arguments": arguments,
}
response = invoke_aws_lambda_func(lambdas, str(data).replace('\'','"'))
print("{}lambda_terminate_instance{}={}".format(apps.FORMAT_RED, apps.FORMAT_RESET, response['Payload'].read()))
if response['StatusCode'] == 200:
streambody = response['Payload'].read().decode()
print("{}stream_body{}={}".format(apps.FORMAT_BLUE, apps.FORMAT_RESET, streambody))
return True
return False
def lambda_halt_job(aws_access_key, aws_secret_key, aws_security_token, job_id, arguments):
lambdas = get_aws_lambda(os.getenv("AWS_ACCESS_KEY_ID"), os.getenv("AWS_SECRET_ACCESS_KEY"))
data = {
"accessKey": aws_access_key,
"secretKey": aws_secret_key,
"sessionToken": aws_security_token,
"jobID": job_id,
"task": apps.TASK_HALT_JOB,
"arguments": arguments
}
response = invoke_aws_lambda_func(lambdas, str(data).replace('\'','"'))
payload = response['Payload'].read()
print("{}lambda_halt_job{}={}".format(apps.FORMAT_RED, apps.FORMAT_RESET, payload))
if len(payload) != 0:
return "{}".format(payload)[2:-1]
else:
return ""
def lambda_export_model(aws_access_key, aws_secret_key, aws_security_token, job_id, arguments):
lambdas = get_aws_lambda(os.getenv("AWS_ACCESS_KEY_ID"), os.getenv("AWS_SECRET_ACCESS_KEY"))
data = {
"accessKey": aws_access_key,
"secretKey": aws_secret_key,
"sessionToken": aws_security_token,
"jobID": job_id,
"task": apps.TASK_EXPORT_MODEL,
"arguments": arguments
}
response = invoke_aws_lambda_func(lambdas, str(data).replace('\'','"'))
payload = response['Payload'].read()
print("{}lambda_export_model{}={}".format(apps.FORMAT_RED, apps.FORMAT_RESET, payload))
if len(payload) != 0:
return "{}".format(payload)[2:-1]
else:
return ""
def lambda_poll(aws_access_key, aws_secret_key, aws_security_token, job_id, arguments):
lambdas = get_aws_lambda(os.getenv("AWS_ACCESS_KEY_ID"), os.getenv("AWS_SECRET_ACCESS_KEY"))
data = {
"accessKey": aws_access_key,
"secretKey": aws_secret_key,
"sessionToken": aws_security_token,
"jobID": job_id,
"task": apps.TASK_POLL,
"arguments": arguments,
}
response = invoke_aws_lambda_func(lambdas, str(data).replace('\'','"'))
payload = response['Payload'].read()
print("{}lambda_poll{}={}".format(apps.FORMAT_RED, apps.FORMAT_RESET, payload))
if len(payload) != 0:
return "{}".format(payload)[2:-1]
else:
return ""
def lambda_run_job(aws_access_key, aws_secret_key, aws_security_token, job_id, arguments):
lambdas = get_aws_lambda(os.getenv("AWS_ACCESS_KEY_ID"), os.getenv("AWS_SECRET_ACCESS_KEY"))
data = {
"accessKey": aws_access_key,
"secretKey": aws_secret_key,
"sessionToken": aws_security_token,
"jobID": job_id,
"task": apps.TASK_RUN_JOB,
"arguments": arguments,
}
response = invoke_aws_lambda_func(lambdas, str(data).replace('\'','"'))
payload = response['Payload'].read()
print("{}lambda_run_job{}={}".format(apps.FORMAT_RED, apps.FORMAT_RESET, payload))
if len(payload) != 0:
return "{}".format(payload)[2:-1]
else:
return ""
def lambda_test_job(aws_access_key, aws_secret_key, aws_security_token, job_id, arguments):
lambdas = get_aws_lambda(os.getenv("AWS_ACCESS_KEY_ID"), os.getenv("AWS_SECRET_ACCESS_KEY"))
data = {
"accessKey": aws_access_key,
"secretKey": aws_secret_key,
"sessionToken": aws_security_token,
"jobID": job_id,
"task": apps.TASK_RUN_TEST,
"arguments": arguments,
}
response = invoke_aws_lambda_func(lambdas, str(data).replace('\'','"'))
payload = response['Payload'].read()
print("{}lambda_test_job{}={}".format(apps.FORMAT_RED, apps.FORMAT_RESET, payload))
if len(payload) != 0:
return "{}".format(payload)[2:-1]
else:
return ""
def get_safe_value_bool(boolean_val):
if boolean_val == 'True':
return True
else:
return False
def get_safe_value(convert_function, input_value, default_value):
try:
return convert_function(input_value)
except ValueError as _:
return default_value
except Exception as _:
return default_value
def debug_parameters(request):
return ' '.join(["{}={}".format(key, value) for key, value in request.POST.items()])
def debug_sessions(request):
for key in request.session.keys():
print("{}{}{}={}".format(apps.FORMAT_CYAN, key, apps.FORMAT_RESET, request.session[key]))
def lambda_info(aws_access_key, aws_secret_key, aws_security_token, job_id, arguments):
lambdas = get_aws_lambda(os.getenv("AWS_ACCESS_KEY_ID"), os.getenv("AWS_SECRET_ACCESS_KEY"))
data = {
"accessKey": aws_access_key,
"secretKey": aws_secret_key,
"sessionToken": aws_security_token,
"jobID": job_id,
"task": apps.TASK_INFO,
"arguments": arguments,
}
response = invoke_aws_lambda_func(lambdas, str(data).replace('\'','"'))
payload = response['Payload'].read()
print("{}lambda_info_job{}={}".format(apps.FORMAT_GREEN, apps.FORMAT_RESET, payload))
if len(payload) != 0:
return "{}".format(payload)[2:-1]
else:
return ""
def add_file_to_info(payload, files):
result = json.loads(payload)
for val in result['environments']:
for file in files:
if val['name'] == 'Cart Pole':
val['file'] = 'Cart Pole.jpg'
continue
if val['name'].replace('.','').replace(' ', '').lower() in file.replace('_','').replace(' ','').lower():
val['file'] = file
break
return result
from django.views.generic.edit import CreateView
from django.urls import reverse_lazy
from .models import Document
class import_model(CreateView):
model = Document
fields = ['upload', ]
success_url = reverse_lazy('upload')
from django.views import View
from storages.backends.s3boto3 import S3Boto3Storage
class file_upload(View):
def post(self, request, **kwargs):
debug_sessions(request)
if 'aws_succeed' not in request.session or not request.session['aws_succeed']:
return HttpResponseRedirect("/easyRL_app/login/")
file_obj = request.FILES.get('upload', 'EMPTY')
aws_access_key = request.session['aws_access_key']
aws_secret_key = request.session['aws_secret_key']
aws_security_token = request.session['aws_security_token']
job_id = request.session['job_id']
bucket = "easyrl-{}{}".format(job_id, request.POST.get('session', '0'))
media_storage = S3Boto3Storage()
media_storage.location = ''
media_storage.file_overwrite = True
media_storage.access_key = aws_access_key
media_storage.secret_key = aws_secret_key
media_storage.bucket_name = bucket
# os.rename(os.path.join(media_storage.location,file_obj.name),os.path.join(media_storage.location,'model.bin'))
# file_obj.name = 'model.bin'
s3_file_path = os.path.join(
media_storage.location,
'model.bin'
)
media_storage.save(s3_file_path, file_obj)
#file_url = media_storage.url(s3_file_path) # direct path of uploaded file on s3
return HttpResponse(lambda_import(aws_access_key, aws_secret_key, aws_security_token, job_id, {}), status=200)
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,606 | RobertCordingly/easyRL-v0 | refs/heads/master | /MVC/helptext.py | Header = "Welcome to Portal RL - A user friendly environment for \n" \
"learning and deploying reinforcement learning environments and agents\n\n\n"
Section1 = " MAIN WORK AREA (Tab X) \n\n" \
" - Getting Started: \n\n" \
" 1) Number of episodes -\n" \
" This parameter is used to define how many times the program should run the same \n" \
" simulation. Depending on the complexity of the model, a user might choose to run the simulation\n" \
" thousands of times to learn an optimal policy for the task it is trying to perform.\n" \
" NEW USER TIP:\n" \
" We recommend starting with a low number of episodes and gradually increasing the amount\n" \
" This will give you a sense of how reinforcement learning works and the affect\n" \
" the number of episodes has on various agent/environment interactions.\n\n" \
" 2) Max Steps -\n" \
" This number dictates how many actions will be taken during each episode before a training\n" \
" is concluded\n" \
" NEW USER TIP:\n" \
" Arbitrarily large numbers of steps don't always work out to better learning. Some of the\n" \
" suggested starting environments have a limited size. As you become more familiar with the \n" \
" environments you will get a feel for how many steps it should take to find an optimal policy\n" \
" and can adjust accordingly. You will learn that some reinforcement learning programs take a \n" \
" large number of episodes (which can include multiple days of training) and an ideal number of\n" \
" steps will allow an agent to run episodes efficiently.\n\n" \
" 3) Select Environment - \n" \
" This drop down menu will allow you to select a reinforcement learning environment\n" \
" from the available python gym sandboxes including a selection of atari games\n" \
" NEW USER TIP:\n" \
" We recommend starting with CartPole, CartPole Discrete, FrozenLake, Pendulum, Acrobat, or \n" \
" MountainCar. As you become more in tune with how these environments work move into the atari\n" \
" games, and if you feel inspired we hope you explore the internet for ways to develop your own\n" \
" environments and interact with our API (Found in our Advanced Options section). \n\n" \
" 4) Select Agent - \n" \
" This drop down menu contains several of the most studied reinforcement learning \n" \
" algorithms to match to your environment. \n" \
" NEW USER TIP: \n" \
" Don't worry, we have installed some guard rails to keep you from matching agents and \n" \
" environments that don't work together. As you study how reinforcement learning works you will \n" \
" understand why those combinations don't work together.\n\n" \
" 5) Set Model -\n" \
" Once you have selected an environment and an agent this will open the training interface.\n\n"
Section2 = " - Training Interface - \n\n" \
" 1) Gamma - \n" \
" This value (0 <= X <= 1) represents the discount that the reinforcement learning agents assign to \n" \
" future rewards when calculating the value of a being between one action and the next. How this is\n" \
" done varies from algorithm to algorithm.\n" \
" NEW USER TIP:\n" \
" This value is, in almost all circumstances, very close to one.\n\n" \
" 2) Min/Max Epsilon and Decay Rate -\n" \
" This value represents the proportion of time an agent should choose a random action or consult\n" \
" its policy. A value of 1 means that it will always choose a random action and a value of 0 means it\n" \
" will always consult the policy. This is set to the Max value initially and decrements at\n" \
" intervals by the decay rate during training. When testing these should be set to zero.\n\n" \
" 3) Batch Size -\n" \
" When training a neural network this is the size of the group of actions and rewards that the \n" \
" network will consider each time while training its decision process. In any neural network this is\n" \
" a value that is fine tuned through testing.\n\n" \
" 4) Memory size -\n" \
" This is the maximimum number of state, action, and reward tuples that are stored for reference\n" \
" by the agent.\n\n" \
" 5) History Length -\n" \
" Neural networks often rely on information that is gained from its perception of the environment, and\n" \
" in environments that a single image doesn't tell the user much about what is happening in a given\n" \
" state this variable determines the number of chronological frames that a neural network will \n" \
" consider when updating its policy.\n\n" \
" 6) Alpha -\n" \
" Alpha is another name for the learning rate or step size. It defines the impact new information has\n" \
" when overwriting previous values.\n\n" \
" 7) Train -\n" \
" Locks in current parameters and initiates training of the agent. Will display statistical\n" \
" information about the training process in the readout space.\n\n" \
" 8) Halt -\n" \
" Prematurely ends the current session of training or testing.\n\n" \
" 9) Test -\n" \
" Set the agent to perform in the environment exclusively according to the current policy and \n" \
" returns results. NOTE: It is advised that the user sets epsilon to zero during testing at this \n" \
" time. It will occasionally produce a bug otherwise. This will be fixed in a future patch.\n\n" \
" 10) Save Agent -\n" \
" Saves the state of the current agent to a file.\n\n" \
" 11) Load Agent -\n" \
" Opens a file selection window that allows the user to select an agent to be loaded. The agent\n" \
" must match the Agent/Environment combination selected during the Start Screen of the tab.\n\n" \
" 12) Reset -\n" \
" Sets the parameters and the agent state to the default.\n\n" \
" 13) Save Results -\n" \
" Opens a file selection window and allows the user to write a save file containing the results of\n" \
" training or testing an agent/environment interaction as a csv file."
Section3 = " TABS AND BUTTONS AND VISUALS\n\n" \
" 1) Tabs - \n" \
" Each tab is a new thread that works on a different agent/environment combination. Add new tabs by \n" \
" clicking the plus button.\n\n" \
" 2) Close Current Tab -\n" \
" This will end the thread being run by the tab and close the tab.\n\n" \
" 3) Reset Current Tab -\n" \
" Ends the thread and sets the tab to its opening default state.\n\n" \
" 4) Load Environment -\n" \
" Opens a file selection window that allows the user to load a custom built environment into the set \n" \
" of environments in the dropdown menu.\n\n" \
" 5) Load Agent -\n" \
" Opens a file selection window and allows the user to load a custom built agent in the set of agents\n" \
" in the dropdown menu.\n\n" \
" 6) Legend -\n" \
" MSE - Mean squared error for recording the loss of an agent/environment interaction\n" \
" Episode Reward - The resulting reward achieved during an episode\n" \
" Epsilon - The current epsilon value during training\n" \
" These contribute to a readout of the performance of an agent/environment interaction.\n\n"
API_info = " Advanced - API information: \n\n" \
" The Portal API requires methods for environments and agents as follows -\n\n" \
" - Environment: \n" \
" Must extend the environment abstract class and contain all abstract methods within and as set forth\n" \
" in the API documentation.\n\n" \
" - Agent:\n" \
" Must extend the abstract modelBasedAgent or modelFreeAgent class from the Agents library or one of\n" \
" their child classes and contain all methods therein described by that class and the abstract Agent\n" \
" class and as set forth in the API documentation.\n\n"
display_episode_speed = "the 'display episode speed' parameter can be changed mid-simulation to adjust how quickly the rendering runs. Keep in mind\nthat each loop is showing the most recent episode, so if you set it high it will often repeat the same episode and if set\nlow it will often skip episodes."
def getHelpGettingStarted():
return Header + Section1 + Section2 + Section3 + API_info + display_episode_speed
# getHelpText()
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,607 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/npg.py | import copy
import joblib
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from Agents import policyIteration
from Agents.Policy import approximator, policy
from collections.abc import Iterable
from torch.autograd import Variable
from torch.distributions import Categorical, kl
from torch.nn.utils.convert_parameters import vector_to_parameters, parameters_to_vector
class NPG(policyIteration.PolicyIteration):
"""
Natural Policy Gradient. A policy iteration agent that updates the
policy using the policy gradient.
Adapted from 'https://github.com/zafarali/policy-gradient-methods/
blob/master/pg_methods/experimental/npg/npg_algorithm.py'.
"""
displayName = 'NPG'
newParameters = [policyIteration.PolicyIteration.Parameter('Delta', 0, 0.05, 0.0001, 0.001, True, True, "The normalized step size for computing the learning rate.")]
parameters = policyIteration.PolicyIteration.parameters + newParameters
def __init__(self, *args):
"""
Constructor for Natural Policy Gradient agent.
"""
paramLen = len(NPG.newParameters)
super().__init__(*args[:-paramLen])
self.delta = float(args[-paramLen])
'''
Define the policy.
'''
# Create a deep learning approximator. MUST USE PYTORCH!
approx = approximator.DeepApproximator(self.state_size, self.action_size, [16], library = 'torch')
# Create a categorical policy with a deep approximator for this agent.
self._policy = policy.CategoricalPolicy(approx)
# Baseline approximator to approximate the value function. MUST USE PYTORCH!
self._value_fn = approximator.DeepApproximator(self.state_size, 1, [16], library = 'torch')
def choose_action(self, state):
"""
Chooses an action given the state and, if given, a policy. The
policy p parameter is optional. If p is None, then the current
policy of the agent will be used. Otherwise, the given policy p is
used.
:param state: is the current state of the environment
:return: the chosen action
:rtype: int
"""
# Choose and return an action using the current policy.
return self._policy.choose_action(np.asarray(state))
def update(self, trajectory: Iterable):
"""
Updates the current policy given a the trajectory of the policy.
:param trajectory: a list of transition frames from the episode.
This represents the trajectory of the episode.
:type trajectory: Iterable
:return: the loss from this update
:rtype: float:
"""
if (not isinstance(trajectory, Iterable)):
raise ValueError("trajectory must be an Iterable.")
# Consolidate the state in the trajectory into an array.
states = np.array([np.asarray(transition.state) for transition in trajectory])
'''
Compute the loss as the log-likelihood of the returns.
'''
# Calculate the returns.
returns = self._calculate_returns(trajectory)
# Calculate the values using the baseline approximator.
values = torch.Tensor([self._value_fn(state)[0] for state in states])
# Calculate the advantage using the returns and the values.
advantages = returns - values
# Compute the loss of the trajectory.
logits = torch.stack([self._policy.logit(np.asarray(transition.state), transition.action, detach = False) for transition in trajectory]).view(-1)
loss = (-logits * advantages).mean()
'''
Compute the gradient and the natural policy gradient.
'''
# Calculate the gradient of the log likelihood loss.
gradient = self._compute_gradient(loss)
gradient = parameters_to_vector(gradient).detach().numpy() + 1e-5
# Calculate the natural policy gradient.
npg = self._compute_npg(gradient, states)
'''
Update the policy and the baseline.
'''
# The learning rate to apply for the update.
alpha = np.sqrt(np.abs(self.delta / (np.dot(gradient.T, npg.detach().numpy()) + 1e-20)))
# The amount to change the parameters by.
update = alpha * npg
# Calculate and set the new parameters of the policy.
new_params = parameters_to_vector(self._policy.get_params(False)) - update
self._policy.set_params(new_params.detach().numpy())
# Update baseline approximator using the cumulative returns.
self._value_fn.update(states, returns.detach().numpy().reshape(-1, 1))
# Return the loss from the update.
return loss.item()
def _calculate_returns(self, trajectory: Iterable):
"""
Calculate the discounted cumualtive rewards of the trajectory.
:param trajectory: a list of transition for a given trajectory
:type trajectory: Iterable
:return: the discounted cumulative returns for each transition
:rtype: torch.Tensor
"""
if (not isinstance(trajectory, Iterable)):
raise ValueError("trajectory must be an Iterable.")
# Calculate the discounted cumulative rewards for each transition
# in the trajectory.
returns = torch.zeros(len(trajectory))
returns[-1] = trajectory[-1].reward
for t in reversed(range(len(trajectory) - 1)):
returns[t] = (1 - trajectory[t].is_done) * trajectory[t].reward + self.gamma * returns[t+1]
# Return the discounted cumulative rewards.
return returns
def _compute_gradient(self, output: torch.Tensor):
"""
Computes the gradient of the given output, using the parameters of
the policy as the inputs.
:param output: is the output to compute the gradient of
:type output: torch.Tensor
:return: the calculated gradient
:rtype: torch.Tensor
"""
if (not isinstance(output, torch.Tensor) or output.requires_grad == False):
raise ValueError("The output must be a torch.Tensor with a grad_fn.")
# Zero out the gradient before computing.
self._policy.zero_grad()
# Compute and return the gradient of the loss using autograd.
gradient = torch.autograd.grad(output, self._policy.get_params(False), retain_graph=True, create_graph=True)
return gradient
def _compute_hvp(self, gradient: np.ndarray, states: np.ndarray, regularization: float = 1e-9):
"""
Computes the Hessian Vector Product (HVP) of the gradient
:param gradient: is the gradient to compute the HVP of
:type gradient: numpy.ndarray
:param states: is the states of the trajectory from which the
gradient was calculated
:type states: numpy.ndarray
:param regularization: amount of regularization to apply
:type regularization: float
:return: the Hessain Vector Product of the gradient
:rtype: torch.Tensor
"""
if (not isinstance(gradient, np.ndarray)):
raise ValueError("gradient must be a numpy array.")
if (not isinstance(states, np.ndarray) or states.shape[1:] != self.state_size):
raise ValueError("states must be a numpy array with each state having the shape {}.".format(self.state_size))
if (not isinstance(regularization, float)):
raise ValueError("regularization must be a float.")
# Convert the gradient into a Tensor.
gradient = torch.from_numpy(gradient).float()
# Zero out the gradient of the current policy.
self._policy.zero_grad()
# Calculate the KL divergence of the old and the new policy distributions.
old_policy = copy.deepcopy(self._policy).get_distribution(states)
new_policy = self._policy.get_distribution(states, detach = False)
mean_kl = torch.mean(kl.kl_divergence(new_policy, old_policy))
# Calculate the gradient of the KL divergence.
kl_grad = torch.autograd.grad(mean_kl, self._policy.get_params(False), create_graph=True)
# Calculate the gradient of the KL gradient to the HVP.
h = torch.sum(parameters_to_vector(kl_grad) * gradient)
hvp = torch.autograd.grad(h, self._policy.get_params(False))
# Flatten the HVP into a one-dimensional tensor.
hvp_flat = np.concatenate([g.contiguous().view(-1).numpy() for g in hvp])
hvp_flat = torch.from_numpy(hvp_flat)
# Return the flatten HVP plus the regularized gradient.
return hvp_flat + regularization * gradient
def _compute_npg(self, gradient: np.ndarray, states: np.ndarray, iters: int = 1, max_residual: float = 1e-10):
"""
Computes the Natural Policy Gradient (NPG) of the policy and the
given gradient.
Adapted from 'https://github.com/zafarali/policy-gradient-methods/
blob/f0d83a80ddc772dcad0c851aac9bfd41d436c274/pg_methods/
conjugate_gradient.py'.
:param gradient: the gradient to compute the NPG of
:type gradient: numpy.ndarray
:param states: the states from the trajectory that generate the
gradient
:type states: numpy.ndarray
:param iters: the number of iteration of conjugation to perform
:type iters: int
:param max_residual: the maximum residual allowed during conjugation
:type max_residual: float
:return: the Natural Policy Gradient of the policy and the given
gradient
:rtype: Torch.tensor
"""
if (not isinstance(gradient, np.ndarray)):
raise ValueError("gradient must be a numpy array.")
if (not isinstance(states, np.ndarray) or states.shape[1:] != self.state_size):
raise ValueError("states must be a numpy array with each state having the shape {}.".format(self.state_size))
if (not isinstance(iters, int) or iters < 1):
raise ValueError("iters must be a positive integer.")
if (not isinstance(max_residual, float)):
raise ValueError("regularization must be a float.")
p = gradient.copy()
r = gradient.copy()
x = np.zeros_like(gradient)
rdotr = r.dot(r)
for i in range(iters):
z = self._compute_hvp(p, states)
v = rdotr / p.dot(z)
x += v*p
r -= (v*z).detach().numpy()
newrdotr = r.dot(r)
mu = newrdotr/rdotr
p = r + mu*p
rdotr = newrdotr
if rdotr < max_residual:
break
return torch.from_numpy(x)
def save(self, filename):
mem = self._policy.get_params()
joblib.dump((NPG.displayName, mem), filename)
def load(self, filename):
name, mem = joblib.load(filename)
if name != NPG.displayName:
print('load failed')
else:
self._policy.set_params(mem)
def memsave(self):
return self._policy.get_params()
def memload(self, mem):
self._policy.set_params(mem)
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,608 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/models.py | from Agents import agent, modelFreeAgent
from Agents.deepQ import DeepQ
import tensorflow as tf
from tensorflow.python.keras.optimizer_v2.adam import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input, Flatten, multiply, Lambda
import sys
class Actor(DeepQ):
def __init__(self, state_size, action_size, policy_lr):
self.state_size = state_size
self.action_size = action_size
self.policy_lr = policy_lr
self.policy_model = self.policy_network()
#self.optim = tf.keras.optimizers.Adam(self.policy_lr)
def policy_network(self):
'''try:
# inputA = Input(shape=self.state_size)
# inputA = Flatten()(inputA)
model = tf.keras.Sequential([
Dense(32, activation='relu', input_shape=(self.state_size)),
Dense(16, activation='relu', input_shape=(self.state_size)),
Dense(self.action_size, activation='softmax', input_shape=(self.state_size))
])
kl = tf.keras.losses.KLDivergence()
model.compile(loss=kl, optimizer=Adam(lr = self.policy_lr))
return model
except:
print("\n\n\n")
print(sys.exc_info())
sys.exit()'''
from tensorflow.python.keras.optimizer_v2.adam import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input, Flatten, multiply
inputA = Input(shape=self.state_size)
inputB = Input(shape=(self.action_size,))
x = Flatten()(inputA)
x = Dense(24, input_dim=self.state_size, activation='relu')(x) # fully connected
x = Dense(24, activation='relu')(x)
outputs = Dense(self.action_size, activation='softmax')(x)
model = Model(inputs=[inputA, inputB], outputs=outputs)
kl = tf.keras.losses.KLDivergence()
model.compile(loss=kl, optimizer=Adam(lr=self.policy_lr))
return model
class Critic(DeepQ):
def __init__(self, state_size, action_size, value_lr):
self.state_size = state_size
self.action_size = action_size
self.value_lr = value_lr
self.value_model = self.value_network()
def value_network(self):
# inputA = Input(shape=self.state_size)
# inputA = Flatten()(inputA)
'''model = tf.keras.Sequential([
Dense(32, activation='relu', input_shape=(self.state_size)),
Dense(16, activation='relu', input_shape=(self.state_size)),
Dense(16, activation='relu', input_shape=(self.state_size)),
Dense(1, activation='linear', input_shape=(self.state_size))
])
model.compile(loss='mse', optimizer=Adam(lr = self.value_lr))'''
from tensorflow.python.keras.optimizer_v2.adam import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input, Flatten, multiply
inputA = Input(shape=self.state_size)
inputB = Input(shape=(self.action_size,))
x = Flatten()(inputA)
x = Dense(24, input_dim=self.state_size, activation='relu')(x) # fully connected
x = Dense(24, activation='relu')(x)
x = Dense(self.action_size, activation='linear')(x)
outputs = multiply([x, inputB])
model = Model(inputs=[inputA, inputB], outputs=outputs)
model.compile(loss='mse', optimizer=Adam(lr=self.value_lr))
return model
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,609 | RobertCordingly/easyRL-v0 | refs/heads/master | /Agents/policyIteration.py | from Agents import modelBasedAgent
from abc import ABC, abstractmethod
from collections.abc import Iterable
class PolicyIteration(modelBasedAgent.ModelBasedAgent, ABC):
displayName = 'Policy Iteration Method'
newParameters = []
parameters = modelBasedAgent.ModelBasedAgent.parameters + newParameters
def __init__(self, *args):
super().__init__(*args)
self._policy = None
@abstractmethod
def update(self, trajectory: Iterable):
"""
Updates the current policy given a the trajectory of the policy.
:param trajectory: a list of transition frames from the episode.
This represents the trajectory of the episode.
:type trajectory: Iterable
"""
pass
| {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,610 | RobertCordingly/easyRL-v0 | refs/heads/master | /Environments/mountainCarEnv.py | from Environments import classicControlEnv
import gym
from PIL import Image, ImageDraw
from math import cos, sin
import numpy as np
class MountainCarEnv(classicControlEnv.ClassicControlEnv):
displayName = 'Mountain Car'
def __init__(self):
self.env = gym.make('MountainCar-v0')
self.action_size = self.env.action_space.n
self.state_size = self.env.observation_space.shape
def step(self, action):
return super().step(action) + 0.1*self.state[0]
def height(self, xs):
return np.sin(3 * xs) * .45 + .55
def rotateTrans(self, x, y, tx, ty, ang):
return tx + x * cos(-ang) + y * sin(-ang), ty - x * sin(-ang) + y * cos(-ang)
def render(self):
screen_width = 600
screen_height = 400
world_width = self.env.max_position - self.env.min_position
scale = screen_width/world_width
carwidth=40
carheight=20
image = Image.new('RGB', (screen_width, screen_height), 'white')
draw = ImageDraw.Draw(image)
xs = np.linspace(self.env.min_position, self.env.max_position, 100)
ys = self.height(xs)
xys = list(zip((xs - self.env.min_position) * scale, ys * scale))
draw.line(xys, fill='black')
pos = self.env.state[0]
carx, cary = (pos - self.env.min_position)*scale, self.height(pos)*scale
rot = cos(3 * pos)
clearance = 10
l, r, t, b = -carwidth / 2, carwidth / 2, carheight, 0
x1,y1 = l, b+clearance
x2,y2 = l, t+clearance
x3,y3 = r,t+clearance
x4,y4 = r,b+clearance
draw.polygon([self.rotateTrans(x1, y1, carx, cary, rot), self.rotateTrans(x2, y2, carx, cary, rot), self.rotateTrans(x3, y3, carx, cary, rot), self.rotateTrans(x4, y4, carx, cary, rot)], fill='black')
rad = carheight/2.5
x1 = carwidth / 4
y1 = clearance
x1, y1 = self.rotateTrans(x1, y1, carx, cary, rot)
draw.chord([x1-rad, y1-rad, x1+rad, y1+rad], 0, 360, fill=(127, 127, 127))
rad = carheight/2.5
x1 = -carwidth / 4
y1 = clearance
x1, y1 = self.rotateTrans(x1, y1, carx, cary, rot)
draw.chord([x1-rad, y1-rad, x1+rad, y1+rad], 0, 360, fill=(127, 127, 127))
flagx = (self.env.goal_position - self.env.min_position) * scale
flagy1 = self.height(self.env.goal_position) * scale
flagy2 = flagy1 + 50
draw.line([(flagx, flagy1), (flagx, flagy2)], fill=(204, 204, 0))
draw.polygon([(flagx, flagy2), (flagx, flagy2-10), (flagx+25, flagy2-5)], fill=(204,204,0))
return image.transpose(method=Image.FLIP_TOP_BOTTOM) | {"/Agents/adrqn.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/rainbow.py": ["/Agents/deepQ.py", "/Agents/Collections/TransitionFrame.py"], "/Custom Agents/DeepSARSA.py": ["/Agents/deepQ.py"], "/Agents/Policy/policy.py": ["/Agents/Policy/approximator.py"], "/Agents/deepQ.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/ddpg.py": ["/Agents/Collections/TransitionFrame.py"], "/Custom Environments/drugDosing/drugDosingEnv.py": ["/Environments/environment.py"], "/Agents/Collections/ExperienceReplay.py": ["/Agents/Collections/TransitionFrame.py"], "/Agents/sac.py": ["/Agents/Collections/TransitionFrame.py"], "/MVC/terminalView.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/ppo.py": ["/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/view.py": ["/MVC/model.py", "/Agents/sarsa.py"], "/Agents/trpo.py": ["/Agents/ppo.py", "/Agents/deepQ.py", "/Agents/models.py", "/Agents/Collections/TransitionFrame.py"], "/MVC/model.py": ["/Agents/Collections/TransitionFrame.py"], "/webpage/easyRL_app/views.py": ["/webpage/easyRL_app/models.py"], "/Agents/models.py": ["/Agents/deepQ.py"]} |
75,612 | preinh/seisPortal | refs/heads/master | /portal/__init__.py | # -*- coding: utf-8 -*-
"""The portal package"""
| {"/portal/controllers/events.py": ["/portal/lib/base.py", "/portal/__init__.py"], "/portal/controllers/stations.py": ["/portal/lib/base.py", "/portal/__init__.py"], "/portal/controllers/bsb.py": ["/portal/lib/base.py", "/portal/__init__.py"]} |
75,613 | preinh/seisPortal | refs/heads/master | /portal/public/images/heli/get_streams.py | from seiscomp3 import Client, IO, Core, DataModel
#dbDriverName="mysql"
#dbAddress="sysop:sysop@localhost/seiscomp3"
#dbPlugin = "dbmysql"
dbDriverName="postgresql"
dbAddress="sysop:sysop@seisDB.iag.usp.br/sc_request"
dbPlugin = "dbpostgresql"
# Get global plugin registry
registry = Client.PluginRegistry.Instance()
# Add plugin dbmysql
registry.addPluginName(dbPlugin)
# Load all added plugins
registry.loadPlugins()
# Create dbDriver
dbDriver = IO.DatabaseInterface.Create(dbDriverName)
# Open Connection
dbDriver.Open(dbAddress)
dbDriver.connect(dbAddress)
# Create query
dbQuery = DataModel.DatabaseQuery(dbDriver)
inventory = dbQuery.loadInventory()
for i in range(inventory.networkCount()):
net = inventory.network(i)
for j in range(net.stationCount()):
station = net.station(j)
for k in range(station.sensorLocationCount()):
location = station.sensorLocation(k)
for l in range(location.streamCount()):
stream = location.stream(l)
print net.code() + "." + station.code() + "." + location.code() + "." + stream.code()
| {"/portal/controllers/events.py": ["/portal/lib/base.py", "/portal/__init__.py"], "/portal/controllers/stations.py": ["/portal/lib/base.py", "/portal/__init__.py"], "/portal/controllers/bsb.py": ["/portal/lib/base.py", "/portal/__init__.py"]} |
75,614 | preinh/seisPortal | refs/heads/master | /portal/model/stations.py | # -*- coding: utf-8 -*-
from portal.lib import app_globals as appg
import psycopg2
import psycopg2.extensions
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
from datetime import datetime
#from seiscomp3 import Client, IO, Core, DataModel
class Stations(object):
def __init__(self):
self.details = []
self.stations_list = []
def getAll(self, filter=""):
self.stations_list = []
# Connect to an existing database
conn = psycopg2.connect(dbname="seisrequest_v7", user="sysop", password="sysop", host="10.110.0.130")
# Open a cursor to perform database operations
cur = conn.cursor()
# Query the database and obtain data as Python objects
cur.execute("""
SELECT net.m_code as net,
station.m_code as sta,
station.m_description as desc,
station.m_latitude as lat,
station.m_longitude as lon,
station.m_elevation as elev,
count(stream.m_code) as channels
FROM station,
network as net,
sensorlocation as sl LEFT OUTER JOIN stream ON (stream._parent_oid = sl._oid )
WHERE sl._parent_oid = station._oid
AND station._parent_oid = net._oid
%s
/* AND net.m_code = 'BL'
AND station.m_code = 'APOB'
*/
GROUP BY net.m_code,
station.m_code,
station.m_description,
station.m_longitude,
station.m_latitude,
station.m_elevation
ORDER BY net, sta;
""" % (filter))
for line in cur:
self.stations_list.append(dict(
NN = line[0],
SSSSS = line[1],
desc = line[2],
lat = ("%.2f") % line[3],
lon = ("%.2f") % line[4],
ele = ("%.1f") % line[5],
n_ch = line[6],
))
# Close communication with the database
cur.close()
conn.close()
return self.stations_list
def getAllJson(self):
json = ""
for sta in self.stations_list:
element = """
{
"type": "Feature",
"properties": {
"net": "%s",
"sta": "%s",
"desc": "%s"
},
"geometry": {
"type": "Point",
"coordinates": [%.4f , %.4f ]
}
}
""" % (sta['NN'], sta['SSSSS'], sta['desc'], float(sta['lon']), float(sta['lat']))
json += element + ","
json = "var stations_gj = [" + json[ : -1] + "];"
return json
def getDetails(self, sid=None):
r = {}
try:
ss = sid.split('_')[-1]
nn = (sid.split('_')[-2]).split('/')[-1]
if not nn or not ss:
r = dict(error="Station Not Found",
details = [],
)
return r
except:
r = dict(error="Out of pattern NN_SSSSS " + str(sid),
details = [],
)
return r
# Connect to an existing database
conn = psycopg2.connect(dbname="seisrequest_v7", user="sysop", password="sysop", host="10.110.0.130")
# Open a cursor to perform database operations
cur = conn.cursor()
# Query the database and obtain data as Python objects
query = """
SELECT net.m_code as net,
station.m_code as sta,
sl.m_code as loc,
stream.m_code as cha,
stream.m_start as cha_sta,
coalesce(stream.m_end, date_trunc('seconds', now()::timestamp)) as cha_end,
station.m_description as desc,
station.m_latitude as lat,
station.m_longitude as lon,
station.m_elevation as elev
FROM station,
network as net,
sensorlocation as sl,
stream
WHERE stream._parent_oid = sl._oid
AND sl._parent_oid = station._oid
AND station._parent_oid = net._oid
AND net.m_code = '%s'
AND station.m_code = '%s'
ORDER BY station.m_code;
""" % (nn, ss)
print query
cur.execute(query)
self.details = []
for line in cur:
png = "%s.%s.%s.%s.ALL.png" % (line[0], line[1], line[2].replace("","--"), line[3])
self.details.append(dict(NN=line[0],
SSSSS=line[1],
LL=line[2],
CCC=line[3],
desc = line[6],
lat = ("%.4f") % line[7],
lon = ("%.4f") % line[8],
ele = ("%.1f") % line[9],
png = "%s.%s/%s"% (line[0], line[1], png),
t0 = ("%sZ"%(line[4])).replace(" ", "T"),
tf = ("%sZ"%(line[5])).replace(" ", "T"),
))
# Close communication with the database
cur.close()
conn.close()
if self.details == []:
return dict(error="unable to get streams",
details=[],
)
return dict(error="",
details=self.details,
)
def __repr__(self):
return ('<Stations>').encode('utf-8')
def __unicode__(self):
return "Stations"
#"""
#scheli capture -I "combined://seisrequest.iag.usp.br:18000;seisrequest.iag.usp.br:18001"
# --offline --amp-range=1E3 --stream BL.AQDB..HHZ -N -o saida.png
#"""
# scxmldump $D -E iag-usp2012ioiu | scmapcut --ep - -E iag-usp2012ioiu -d 1024x768 -m 5 --layers -o evt.png
| {"/portal/controllers/events.py": ["/portal/lib/base.py", "/portal/__init__.py"], "/portal/controllers/stations.py": ["/portal/lib/base.py", "/portal/__init__.py"], "/portal/controllers/bsb.py": ["/portal/lib/base.py", "/portal/__init__.py"]} |
75,615 | preinh/seisPortal | refs/heads/master | /portal/lib/app_globals.py | # -*- coding: utf-8 -*-
"""The application's Globals object"""
#from seiscomp3 import Client, IO, Core, DataModel
__all__ = ['Globals']
#def singleton(cls):
# instances = {}
# def getinstance():
# if cls not in instances:
# instances[cls] = cls()
# return instances[cls]
# return getinstance
#
#@singleton
#class singleInventory():
#
# debug = False
#
# if debug:
## dbDriverName="postgresql"
## dbAddress="sysop:sysop@localhost/seiscomp3"
## dbPlugin = "dbpostgresql"
# dbDriverName="mysql"
# dbAddress="sysop:sysop@localhost/seiscomp3"
# dbPlugin = "dbmysql"
# else:
# dbDriverName="postgresql"
# dbAddress="sysop:sysop@10.110.0.130/sc_request"
# dbPlugin = "dbpostgresql"
#
# # Get global plugin registry
# registry = Client.PluginRegistry.Instance()
# # Add plugin dbmysql
# registry.addPluginName(dbPlugin)
# # Load all added plugins
# registry.loadPlugins()
# # Create dbDriver
# dbDriver = IO.DatabaseInterface.Create(dbDriverName)
# # Open Connection
# #dbDriver.Open(dbAddress)
# dbDriver.connect(dbAddress)
# # set Query object
# dbQuery = DataModel.DatabaseQuery(dbDriver)
#
# inventory = dbQuery.loadInventory()
#
#
# def __repr__(self):
# return ('Inventory:Global')
#
# def __unicode__(self):
# return "Inventory_Global"
class Globals(object):
"""Container for objects available throughout the life of the application.
One instance of Globals is created during application initialization and
is available during requests via the 'app_globals' variable.
"""
def __init__(self):
"""Do nothing, by default."""
pass
| {"/portal/controllers/events.py": ["/portal/lib/base.py", "/portal/__init__.py"], "/portal/controllers/stations.py": ["/portal/lib/base.py", "/portal/__init__.py"], "/portal/controllers/bsb.py": ["/portal/lib/base.py", "/portal/__init__.py"]} |
75,616 | preinh/seisPortal | refs/heads/master | /portal/model/bsb.py | # -*- coding: utf-8 -*-
#import os
#import sys
#
#from datetime import datetime
#from sqlalchemy import Table, ForeignKey, Column
#from sqlalchemy.orm import relation, synonym
#from sqlalchemy.types import Unicode, Integer, DateTime
#
#from portal.model import DeclarativeBase, metadata, DBSession
import sys
import psycopg2
import psycopg2.extensions
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
#from seiscomp3 import Client, IO, Core, DataModel
import commands
from datetime import datetime, timedelta
class BoletimSismico(object):
debug = False
def __init__(self):
if self.debug:
self.dbDriverName="postgresql"
self.dbAddress="sysop:sysop@localhost/seiscomp3"
self.dbPlugin = "dbpostgresql"
else:
self.dbDriverName="postgresql"
self.dbAddress="sysop:sysop@10.110.0.130/master_sc3"
self.dbPlugin = "dbpostgresql"
before = 3*365*100
self.e = datetime.utcnow()
self.b = self.e - timedelta(days=before)
self.events_list = []
def getAll(self, filter="", limit=None):
self.events_list = []
# Connect to an existing database
conn = psycopg2.connect(dbname="master_sc3", user="sysop", password="sysop", host="10.110.0.130")
# Open a cursor to perform database operations
cur = conn.cursor()
# Query the database and obtain data as Python objects
cur.execute("""
SELECT m_publicid AS eventid,
m_text AS "desc",
'IAG' AS agency,
m_time_value AS "time",
m_latitude_value AS lat,
m_longitude_value AS lon,
m_depth_value AS depth,
m_magnitude_value AS mag,
m_type AS mag_type,
coalesce(m_stationcount,0) AS mag_count,
m_evaluationmode AS status,
author
FROM gis_bsb_mv
WHERE m_magnitude_value::numeric >= 0
AND m_time_value <> date '1970-01-01 00:00:00'
AND m_time_value >= '%s'
AND m_time_value <= '%s'
%s
ORDER BY time DESC;
""" % (self.b, self.e, filter))
for line in cur:
evt = line[0]
desc = line[1]
_time = line[3]
lat= ("%.2f") % line[4]
lon= ("%.2f") % line[5]
dep= ("%d") % line[6]
val = line[7]
typ = line[8]
stc = line[9]
status = line[10]
author = line[11]
try:
_mag = ("%.1f %s (%d)") % (val, typ, stc)
except:
_mag = u"--"
d = dict(id=evt,
desc= desc,
time= _time,
lat= lat,
lon= lon,
dep= dep,
mag= _mag,
status = status,
author = author,
)
self.events_list.append(d)
# Close communication with the database
cur.close()
conn.close()
#return sorted(self.events_list, key=lambda event: event['time'], reverse=True)
return self.events_list[0:limit]
def getAllGeoJson(self, limit=None):
json=""
try:
for d in self.events_list[0:limit]:
json += """
{
"type": "Feature",
"properties": {
"id": '%s',
"mag": '%s',
"desc": "%s",
"time": '%s',
},
"geometry": {
"type": "Point",
"coordinates": [ %f , %f ],
}
},
""" % (d['id'], d['mag'], d['desc'], d['time'], float(str(d['lon'])), float(str(d['lat'])))
json = "var geojson_bsb = [" + json[:] + "];"
except:
print "Unexpected error:", sys.exc_info()[0]
json = "var geojson_bsb = [ ];"
pass
return json
def getLastGeoJson(self):
json=""
try:
d = self.events_list[0]
json = """
{
"type": "Feature",
"properties": {
"id": "%s",
"mag": "%s",
"desc": "%s",
"time": "%s"
},
"geometry": {
"type": "Point",
"coordinates": [%f, %f]
}
},
"""%(d['id'], d['mag'], d['desc'],d['time'], float(str(d['lon'])), float(str(d['lat'])))
json = "var geojson_bsb_l = [" + json[:] + "];"
except:
print d['id'], d['mag'], d['desc'],d['time'], float(str(d['lon'])), float(str(d['lat']))
json = "var geojson_bsb_l = [ ];"
pass
return json
def getDetails(self, eid=None):
r = {}
if not eid:
r = dict(error="Invalid ID")
return r
#evt = self.dbQuery.getEventByPublicID(eid)
# if not evt:
# r = dict(error="Event not Found")
# return r
cmd = "/home/suporte/seiscomp3/bin/seiscomp exec scbulletin -E %s -3 --extra -d '%s://%s'" % (eid, self.dbDriverName, self.dbAddress)
out = commands.getstatusoutput(cmd)
out_lines = out[1]
out_lines = out_lines.split('\n')
r = dict(error="",
eid=eid,
t = out_lines,
)
return r
# def _createQuery(self):
# # Get global plugin registry
# self.registry = Client.PluginRegistry.Instance()
# # Add plugin dbmysql
# self.registry.addPluginName(self.dbPlugin)
# # Load all added plugins
# self.registry.loadPlugins()
# # Create dbDriver
# self.dbDriver = IO.DatabaseInterface.Create(self.dbDriverName)
# # Open Connection
# #dbDriver.Open(dbAddress)
# self.dbDriver.connect(self.dbAddress)
# # set Query object
# return DataModel.DatabaseQuery(self.dbDriver)
def __repr__(self):
return ('<BoletimSismico: start=%s end=%s>' % str(self.s), str(self.e)).encode('utf-8')
def __unicode__(self):
return "BoletimSismico Model"
| {"/portal/controllers/events.py": ["/portal/lib/base.py", "/portal/__init__.py"], "/portal/controllers/stations.py": ["/portal/lib/base.py", "/portal/__init__.py"], "/portal/controllers/bsb.py": ["/portal/lib/base.py", "/portal/__init__.py"]} |
75,617 | preinh/seisPortal | refs/heads/master | /portal/controllers/__init__.py | # -*- coding: utf-8 -*-
"""Controllers for the portal application."""
| {"/portal/controllers/events.py": ["/portal/lib/base.py", "/portal/__init__.py"], "/portal/controllers/stations.py": ["/portal/lib/base.py", "/portal/__init__.py"], "/portal/controllers/bsb.py": ["/portal/lib/base.py", "/portal/__init__.py"]} |
75,618 | preinh/seisPortal | refs/heads/master | /portal/controllers/events.py | #from tg import expose, request
from tg import expose, flash, require, url, lurl, request, redirect
from tg.i18n import ugettext as _, lazy_ugettext as l_
from portal.lib.base import BaseController
from portal import model
from itertools import cycle
__all__ = ['EventsController']
import tw2.core as twc
#import tw2.forms as twf
#import tw2.dynforms as twd
#import tw2.jqplugins.ui as jqui
from datetime import datetime
#from eventsForms import EventFilterForm
import eventsForms as ef
import bsbForms as bf
#class Event_Page(twc.Page):
# title = "page"
# child = ef.EventFilterForm()
class EventsController(BaseController):
@expose('portal.templates.events')
def index(self, *args, **kw):
"""Handle the events page."""
e = model.events.Events()
filter = ""
dat = {}
if kw != {}:
for k, v in kw.iteritems():
dat[k]=v
if v != '':
if k == "mag_f":
filter += " AND magnitude.m_magnitude_value >= %f " % (float(v))
elif k == "mag_t":
filter += " AND magnitude.m_magnitude_value <= %f " % (float(v))
elif k == "dep_f":
filter += " AND origin.m_depth_value >= %f " % (float(v))
elif k == "dep_t":
filter += " AND origin.m_depth_value <= %f " % (float(v))
elif k == "lat_f":
filter += " AND origin.m_latitude_value >= %f " % (float(v))
elif k == "lat_t":
filter += " AND origin.m_latitude_value <= %f " % (float(v))
elif k == "lon_f":
filter += " AND origin.m_longitude_value >= %f " % (float(v))
elif k == "lon_t":
filter += " AND origin.m_longitude_value <= %f " % (float(v))
elif k == "date_f":
e.b = datetime.strptime(v, "%d-%m-%Y %H:%M")
elif k == "date_t":
e.e = datetime.strptime(v, "%d-%m-%Y %H:%M")
event_list = e.getAll(filter=filter)
geojson = e.getAllGeoJson()
geojson_l = e.getLastGeoJson()
f = ef.EventFilterForm().req()
b = model.bsb.BoletimSismico()
bsb_filter = ""
bsb_dat = {}
if kw != {}:
for k, v in kw.iteritems():
bsb_dat[k]=v
if v != '':
if k == "bsb_mag_f":
bsb_filter += " AND m_magnitude_value >= %f " % (float(v))
elif k == "bsb_mag_t":
bsb_filter += " AND m_magnitude_value <= %f " % (float(v))
elif k == "bsb_dep_f":
bsb_filter += " AND m_depth_value >= %f " % (float(v))
elif k == "bsb_dep_t":
bsb_filter += " AND m_depth_value <= %f " % (float(v))
elif k == "bsb_lat_f":
bsb_filter += " AND m_latitude_value >= %f " % (float(v))
elif k == "bsb_lat_t":
bsb_filter += " AND m_latitude_value <= %f " % (float(v))
elif k == "bsb_lon_f":
bsb_filter += " AND m_longitude_value >= %f " % (float(v))
elif k == "bsb_lon_t":
bsb_filter += " AND m_longitude_value <= %f " % (float(v))
elif k == "bsb_date_f":
b.b = datetime.strptime(v, "%d-%m-%Y %H:%M")
elif k == "bsb_date_t":
b.e = datetime.strptime(v, "%d-%m-%Y %H:%M")
bsb_list = b.getAll(limit=60, filter=bsb_filter)
geojson_l = b.getAllGeoJson(limit=60)
bsb_f = bf.BsbFilterForm().req()
return dict(page = 'events',
filterForm = f,
bsbFilterForm = bsb_f,
data = dat,
bsb_data = bsb_dat,
events = event_list,
bsb = bsb_list,
cycle = cycle,
geojson = geojson,
geojson_l = geojson_l,
evt_png = url("/images/event.png"),
last_evt_png = url("/images/star2.png"),
)
@expose('portal.templates.events')
def events(self, *args, **kw):
"""Handle the events page."""
e = model.events.Events()
event_list = e.getAll()
geojson = e.getAllGeoJson()
#geojson_l = e.getLastGeoJson()
#json_l = e.getLastJson()
b = model.bsb.BoletimSismico()
bsb_list = b.getAll()
geojson_l = b.getLastGeoJson()
f = ef.EventFilterForm().req()
bsb_f = bf.BsbFilterForm().req()
return dict(page='events',
filterForm = f,
bsbFilterForm = bsb_f,
events = event_list,
bsb = bsb_list,
cycle = cycle,
geojson = geojson,
geojson_l = geojson_l,
evt_png = url("/images/event.png"),
last_evt_png = url("/images/star2.png"),
)
@expose('portal.templates.event')
def _default(self, came_from=url('/'), *args, **kw):
id = came_from
event_details = model.events.Events().getDetails(id)
f = ef.EventFilterForm().req()
bsb_f = bf.BsbFilterForm().req()
return dict(page='event',
filterForm=f,
bsbFilterForm= bsb_f,
d = event_details)
| {"/portal/controllers/events.py": ["/portal/lib/base.py", "/portal/__init__.py"], "/portal/controllers/stations.py": ["/portal/lib/base.py", "/portal/__init__.py"], "/portal/controllers/bsb.py": ["/portal/lib/base.py", "/portal/__init__.py"]} |
75,619 | preinh/seisPortal | refs/heads/master | /portal/model/events.py | # -*- coding: utf-8 -*-
#import os
#import sys
#
#from datetime import datetime
#from sqlalchemy import Table, ForeignKey, Column
#from sqlalchemy.orm import relation, synonym
#from sqlalchemy.types import Unicode, Integer, DateTime
#
#from portal.model import DeclarativeBase, metadata, DBSession
import psycopg2
import psycopg2.extensions
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
#from seiscomp3 import Client, IO, Core, DataModel
import commands
from datetime import datetime, timedelta
class Events(object):
debug = False
def __init__(self):
if self.debug:
self.dbDriverName="postgresql"
self.dbAddress="sysop:sysop@localhost/seiscomp3"
self.dbPlugin = "dbpostgresql"
else:
self.dbDriverName="postgresql"
self.dbAddress="sysop:sysop@10.110.0.130/master_sc3"
self.dbPlugin = "dbpostgresql"
daysBefore = 20
self.e = datetime.utcnow()
self.b = self.e - timedelta(days=daysBefore)
self.events_list = []
def getAll(self, filter=""):
self.events_list = []
# Connect to an existing database
conn = psycopg2.connect(dbname="master_sc3", user="sysop", password="sysop", host="10.110.0.130")
# Open a cursor to perform database operations
cur = conn.cursor()
# Query the database and obtain data as Python objects
cur.execute("""
SELECT pevent.m_publicid AS eventid,
eventdescription.m_text AS "desc",
event.m_creationinfo_agencyid AS agency,
origin.m_time_value AS "time",
origin.m_latitude_value AS lat,
origin.m_longitude_value AS lon,
origin.m_depth_value AS depth,
magnitude.m_magnitude_value AS mag,
magnitude.m_type AS mag_type,
coalesce(magnitude.m_stationcount, 0) AS mag_count,
case
when origin.m_evaluationmode = 'automatic' then 'A'
when origin.m_evaluationmode = 'manual' then 'M'
else 'U'
end AS status,
origin.m_creationinfo_author as author
FROM event LEFT OUTER JOIN publicobject pmagnitude
ON (event.m_preferredmagnitudeid::text = pmagnitude.m_publicid::text),
publicobject pevent,
origin,
publicobject porigin,
magnitude,
eventdescription
WHERE event._oid = pevent._oid
AND origin._oid = porigin._oid
AND magnitude._oid = pmagnitude._oid
AND event.m_preferredoriginid::text = porigin.m_publicid::text
AND coalesce(event.m_type, '') not in ('not existing', 'outside of network interest')
AND eventdescription._parent_oid = pevent._oid
AND origin.m_time_value >= '%s'
AND origin.m_time_value <= '%s'
%s
ORDER BY time DESC;
""" % (self.b, self.e, filter))
for line in cur:
evt = line[0]
desc = line[1]
_time = line[3]
lat= ("%.2f") % line[4]
lon= ("%.2f") % line[5]
dep= ("%d") % line[6]
val = line[7]
typ = line[8]
stc = line[9]
status = line[10]
author = line[11]
try:
_mag = ("%.1f %s (%d)") % (val, typ, stc)
except:
_mag = u"--"
d = dict(id=evt,
desc= desc,
time= _time,
lat= lat,
lon= lon,
dep= dep,
mag= _mag,
status = status,
author = author
)
self.events_list.append(d)
# Close communication with the database
cur.close()
conn.close()
#return sorted(self.events_list, key=lambda event: event['time'], reverse=True)
return self.events_list
def getAllGeoJson(self, limit=None):
geojson=""
try:
for d in self.events_list[0:limit]:
geojson+= """
{
"type": "Feature",
"properties": {
"id": "%s",
"mag": "%s",
"desc": "%s",
"time": "%s"
},
"geometry": {
"type": "Point",
"coordinates": [%f, %f]
}
}, """%(d['id'], d['mag'], d['desc'],d['time'], float(str(d['lon'])), float(str(d['lat'])))
geojson = "var geojson = [" + geojson[:] + "];"
except:
print geojson
pass
return geojson
def getLastGeoJson(self):
json=""
try:
d = self.events_list[0]
json = """
{
"type": "Feature",
"properties": {
"id": "%s",
"mag": "%s",
"desc": "%s",
"time": "%s"
},
"geometry": {
"type": "Point",
"coordinates": [%f, %f]
}
},
"""%(d['id'], d['mag'], d['desc'],d['time'], float(str(d['lon'])), float(str(d['lat'])))
json = "var geojson_l = [" + json[:] + "];"
except:
json = "var geojson_l = [ ];"
print json
pass
return json
def getDetails(self, eid=None):
r = {}
if not eid:
r = dict(error="Invalid ID")
return r
#evt = self.dbQuery.getEventByPublicID(eid)
# if not evt:
# r = dict(error="Event not Found")
# return r
cmd = "/home/suporte/seiscomp3/bin/seiscomp exec scbulletin -E %s -3 --extra -d '%s://%s'" % (eid, self.dbDriverName, self.dbAddress)
out = commands.getstatusoutput(cmd)
out_lines = out[1]
out_lines = out_lines.split('\n')
r = dict(error="",
eid=eid,
t = out_lines,
)
return r
# def _createQuery(self):
# # Get global plugin registry
# self.registry = Client.PluginRegistry.Instance()
# # Add plugin dbmysql
# self.registry.addPluginName(self.dbPlugin)
# # Load all added plugins
# self.registry.loadPlugins()
# # Create dbDriver
# self.dbDriver = IO.DatabaseInterface.Create(self.dbDriverName)
# # Open Connection
# #dbDriver.Open(dbAddress)
# self.dbDriver.connect(self.dbAddress)
# # set Query object
# return DataModel.DatabaseQuery(self.dbDriver)
def __repr__(self):
return ('<Events: start=%s end=%s>' % str(self.s), str(self.e)).encode('utf-8')
def __unicode__(self):
return "Events Model"
| {"/portal/controllers/events.py": ["/portal/lib/base.py", "/portal/__init__.py"], "/portal/controllers/stations.py": ["/portal/lib/base.py", "/portal/__init__.py"], "/portal/controllers/bsb.py": ["/portal/lib/base.py", "/portal/__init__.py"]} |
75,620 | preinh/seisPortal | refs/heads/master | /portal/controllers/root.py | # -*- coding: utf-8 -*-
"""Main Controller"""
from tg import expose, flash, require, url, lurl, request, redirect
from tg.i18n import ugettext as _, lazy_ugettext as l_
from itertools import cycle
from portal import model
from repoze.what import predicates
from portal.controllers.secure import SecureController
from portal.model import DBSession, metadata
from tgext.admin.tgadminconfig import TGAdminConfig
from tgext.admin.controller import AdminController
from portal.lib.base import BaseController
from portal.controllers.error import ErrorController
from portal.controllers.events import EventsController
from portal.controllers.stations import StationsController
from portal.controllers.bsb import BsbController
import urllib2
from json import loads
import psycopg2
import psycopg2.extras
import json
import collections
from datetime import datetime, timedelta
import calendar
__all__ = ['RootController']
class RootController(BaseController):
"""
The root controller for the portal application.
All the other controllers and WSGI applications should be mounted on this
controller. For example::
panel = ControlPanelController()
another_app = AnotherWSGIApplication()
Keep in mind that WSGI applications shouldn't be mounted directly: They
must be wrapped around with :class:`tg.controllers.WSGIAppController`.
"""
secc = SecureController()
admin = AdminController(model, DBSession, config_type=TGAdminConfig)
error = ErrorController()
events = EventsController()
bsb = BsbController()
stations = StationsController()
@expose('portal.templates.index')
@expose('portal.templates.m_world')
@expose('portal.templates.m_bsb')
def index(self):
e = model.events.Events()
event_list = e.getAll()
geojson = e.getAllGeoJson(8)
b = model.bsb.BoletimSismico()
bsb_list = b.getAll()
geojson_l = b.getAllGeoJson(20)
return dict(page='index',
filterForm = "",
events = event_list[:8],
bsb = bsb_list[:20],
cycle = cycle,
geojson = geojson,
geojson_l = geojson_l,
evt_png = url("/images/event.png"),
last_evt_png = url("/images/event.png"),
)
# """Handle the front-page."""
# return dict(page='index')
@expose('portal.templates.waveform')
def waveform(self):
"""Handle the waveform page."""
event_list = model.events.Events().getAll()
return dict(page='waveform', events=event_list)
@expose('portal.templates.inform')
def inform(self):
"""Handle the waveform page."""
return dict(page='inform')
@expose('portal.templates.download')
def download(self, *args, **kw):
import downloadForms as df
from datetime import datetime
filter = ""
dat = {}
if kw != {}:
for k, v in kw.iteritems():
dat[k]=v
if v != '':
if k == "network":
filter += " network %s " % v
elif k == "station":
filter += " station %s " % v
elif k == "channel":
filter += " channel %s " % v
elif k == "onehour":
filter += " onehour %s " % v
elif k == "type":
filter += " type %s " % v
elif k == "outfile":
filter += " outfile %s " % v
elif k == "type":
filter += " network %s " % v
elif k == "date_f":
filter += " start " + str(datetime.strptime(v, "%d-%m-%Y %H:%M"))
elif k == "date_t":
filter += " end " + str(datetime.strptime(v, "%d-%m-%Y %H:%M"))
print dat
f = df.DownloadForm().req()
"""Handle the waveform page."""
# event_list = model.events.Events().getAll()
return dict(page='download',
downloadForm = f,
data = dat,
)
@expose('portal.templates.data_availability')
def data_availability(self, **kw):
d = None
"""return net_station_loc_chan"""
try:
con = psycopg2.connect(host="10.110.0.134", database='seishub', user='seishub', password="seishub")
cursor = con.cursor(cursor_factory=psycopg2.extras.DictCursor)
query = """
select network_id, station_id, location_id, channel_id, start_datetime, end_datetime
from "/seismology/station"
group by network_id, station_id, location_id, channel_id, start_datetime, end_datetime
order by network_id, station_id, location_id, channel_id desc, start_datetime, end_datetime
"""
cursor.execute(query)
rows = cursor.fetchall()
l = []
old_sta = None
old_loc = None
break_sta = False
break_loc = False
for r in rows:
if old_sta != r['station_id']:
old_sta = r['station_id']
break_sta = True
old_loc = r['location_id']
break_loc = True
else:
break_sta = False
if old_loc != r['location_id']:
old_loc = r['location_id']
break_loc = True
else:
break_loc = False
if r["end_datetime"] == None:
r["end_datetime"] = datetime.now()
if r["end_datetime"] > datetime.now():
r["end_datetime"] = datetime.now()
if r["start_datetime"] >= datetime(2010,01,01):
show = True
else:
show= False
l.append(dict(
net=r['network_id'],
sta=r['station_id'],
loc=r['location_id'],
cha=r['channel_id'],
t0=r['start_datetime'],
tf=r['end_datetime'],
break_sta = break_sta,
break_loc = break_loc,
show = show
))
d = l
con.close()
except psycopg2.DatabaseError, e:
print 'Error %s' % e
pass
finally:
if con:
con.close()
return dict(page='data_availability', args=kw, channels=d)
@expose('portal.templates.availability_1990')
def availability_1990(self, **kw):
d = None
"""return net_station_loc_chan"""
try:
con = psycopg2.connect(host="10.110.0.134", database='seishub', user='seishub', password="seishub")
cursor = con.cursor(cursor_factory=psycopg2.extras.DictCursor)
query = """
select network_id, station_id, location_id, channel_id, start_datetime, end_datetime
from "/seismology/station"
group by network_id, station_id, location_id, channel_id, start_datetime, end_datetime
order by network_id, station_id, location_id, channel_id desc, start_datetime, end_datetime
"""
cursor.execute(query)
rows = cursor.fetchall()
l = []
old_sta = None
old_loc = None
break_sta = False
break_loc = False
for r in rows:
if old_sta != r['station_id']:
old_sta = r['station_id']
break_sta = True
old_loc = r['location_id']
break_loc = True
else:
break_sta = False
if old_loc != r['location_id']:
old_loc = r['location_id']
break_loc = True
else:
break_loc = False
if r["start_datetime"] < datetime(2010,01,01):
show = True
else:
show= False
l.append(dict(
net=r['network_id'],
sta=r['station_id'],
loc=r['location_id'],
cha=r['channel_id'],
t0=r['start_datetime'],
tf=r['end_datetime'],
break_sta = break_sta,
break_loc = break_loc,
show = show
))
d = l
con.close()
except psycopg2.DatabaseError, e:
print 'Error %s' % e
pass
finally:
if con:
con.close()
return dict(page='data_availability', args=kw, channels=d, cycle=cycle)
@expose('portal.templates.availability_2010')
def availability_2010(self, **kw):
d = None
"""return net_station_loc_chan"""
try:
con = psycopg2.connect(host="10.110.0.134", database='seishub', user='seishub', password="seishub")
cursor = con.cursor(cursor_factory=psycopg2.extras.DictCursor)
query = """
select network_id, station_id, location_id, channel_id, start_datetime, end_datetime
from "/seismology/station"
group by network_id, station_id, location_id, channel_id, start_datetime, end_datetime
order by network_id, station_id, location_id, channel_id desc, start_datetime, end_datetime
"""
cursor.execute(query)
rows = cursor.fetchall()
l = []
old_sta = None
old_loc = None
break_sta = False
break_loc = False
for r in rows:
if old_sta != r['station_id']:
old_sta = r['station_id']
break_sta = True
old_loc = r['location_id']
break_loc = True
else:
break_sta = False
if old_loc != r['location_id']:
old_loc = r['location_id']
break_loc = True
else:
break_loc = False
if r["end_datetime"] == None:
r["end_datetime"] = datetime.now()
if r["end_datetime"] > datetime.now():
r["end_datetime"] = datetime.now()
if r["end_datetime"] >= datetime(2010,01,01):
show = True
else:
show= False
l.append(dict(
net=r['network_id'],
sta=r['station_id'],
loc=r['location_id'],
cha=r['channel_id'],
t0=r['start_datetime'],
tf=r['end_datetime'],
break_sta = break_sta,
break_loc = break_loc,
show = show
))
d = l
con.close()
except psycopg2.DatabaseError, e:
print 'Error %s' % e
pass
finally:
if con:
con.close()
return dict(page='data_availability', args=kw, channels=d, cycle=cycle)
#@expose('portal.templates.data')
@expose('json')
def getStations(self, **kw):
"""This method showcases how you can use the same controller for a data page and a display page"""
seishub_stations = "http://seishub.iag.usp.br/seismology/station/getList?format=json&network_id=BL"
req = urllib2.Request(seishub_stations)
opener = urllib2.build_opener()
f = opener.open(req)
json = loads(f.read())
#return dict(params=kw)
return dict(stations=dict(args=kw, json=json))
#@expose('portal.templates.data')
@expose('json')
def getGaps(self, **kw):
#gaps("2010-01-01T00:00:00Z", "2013-12-31T00:00:00Z", 1, "%s"%sta, "HHZ")
j = None
con = None
try:
t0 = datetime.strptime(kw["t0"], '%Y-%m-%dT%H:%M:%SZ')
tf = datetime.strptime(kw["tf"], '%Y-%m-%dT%H:%M:%SZ')
d = int(kw["d"])
#delta = timedelta(days=)
net = kw["n"]
sta = kw["s"]
loc = kw["l"]
dt = kw["dt"]
#channel = kw["c"]
con = psycopg2.connect(host="10.110.0.134", database='seishub', user='seishub', password="seishub")
cursor = con.cursor(cursor_factory=psycopg2.extras.DictCursor)
query = """
select t0, avg(percent) "percent"
from mv_gaps_monthly
where net = '%s'
and sta = '%s'
and loc = '%s'
and t0 >= '%s'
and tf <= '%s'
group by net, sta, loc, t0
"""%(net, sta, loc, t0, tf)
#print query
cursor.execute(query)
#print json.dumps(cursor.fetchall(), default=date_handler)
rows = cursor.fetchall()
l = []
for r in rows:
l.append([calendar.timegm(r['t0'].timetuple()),r['percent']])
#print l
#print json.dumps(dict(l))
j = json.dumps(dict(l))
#print l
#file = 'month_hour_%s.%s.js'%(station,channel)
#f = open(file,'w')
#print >> f, j
con.close()
except psycopg2.DatabaseError, e:
print 'Error %s' % e
pass
finally:
if con:
con.close()
#sta = station["code"]
"""This method showcases how you can use the same controller for a data page and a display page"""
#seishub_stations = "http://seishub.iag.usp.br/seismology/station/getList?format=json&network_id=BL"
#req = urllib2.Request(seishub_stations)
#opener = urllib2.build_opener()
#f = opener.open(req)
#json = loads(f.read())
#return dict(params=kw)
return dict(gaps=j)
#@expose('portal.templates.data')
@expose('json')
def getGapsDaily(self, **kw):
#gaps("2010-01-01T00:00:00Z", "2013-12-31T00:00:00Z", 1, "%s"%sta, "HHZ")
j = None
con = None
try:
t0 = datetime.strptime(kw["t0"], '%Y-%m-%dT%H:%M:%SZ')
tf = datetime.strptime(kw["tf"], '%Y-%m-%dT%H:%M:%SZ')
d = int(kw["d"])
#delta = timedelta(days=)
dt = kw["dt"]
net = kw["n"]
sta = kw["s"]
loc = kw["l"]
#channel = kw["c"]
if net == "": net = "BL"
con = psycopg2.connect(host="10.110.0.134", database='seishub', user='seishub', password="seishub")
cursor = con.cursor(cursor_factory=psycopg2.extras.DictCursor)
# query = """
# select t0, percent from get_gaps( '%s', '%s',
# '1 day'::interval,
# '%s'::text, '%s'::text )
# """%(t0, tf, station, channel)
#
# query = """
# select t0, avg(percent) "percent"
# from mv_gaps_daily
# where net = '%s'
# and sta = '%s'
# and loc = '%s'
# and cha = '%s'
# and t0 > '%s'
# and tf < '%s'
# group by net, sta, loc, t0
# """%(network, station, location, channel, t0, tf)
query = """
select t0, avg(percent) "percent"
from mv_gaps_weekly
where net = '%s'
and sta = '%s'
and loc = '%s'
and t0 >= '%s'
and tf <= '%s'
group by net, sta, loc, t0
"""%(net, sta, loc, t0, tf)
#print query
cursor.execute(query)
#print json.dumps(cursor.fetchall(), default=date_handler)
rows = cursor.fetchall()
l = []
for r in rows:
l.append([calendar.timegm(r['t0'].timetuple()),r['percent']])
# l.append(dict(date=calendar.timegm(r['t0'].timetuple()),
# gaps = r['percent']))
#print l
#print json.dumps(dict(l))
j = json.dumps(dict(l))
#j = json.dumps(l)
#print j
#file = 'month_hour_%s.%s.js'%(station,channel)
#f = open(file,'w')
#print >> f, j
con.close()
except psycopg2.DatabaseError, e:
print 'Error %s' % e
pass
finally:
if con:
con.close()
#sta = station["code"]
"""This method showcases how you can use the same controller for a data page and a display page"""
#seishub_stations = "http://seishub.iag.usp.br/seismology/station/getList?format=json&network_id=BL"
#req = urllib2.Request(seishub_stations)
#opener = urllib2.build_opener()
#f = opener.open(req)
#json = loads(f.read())
#return dict(params=kw)
return dict(gaps=j)
@expose('portal.templates.google')
def google(self):
"""Handle google data backend."""
return dict(page='google')
@expose('portal.templates.about')
def about(self, *args, **kw):
return dict(page='about')
@expose('portal.templates.disclaimer')
def disclaimer(self, *args, **kw):
return dict(page='disclaimer')
@expose('portal.templates.environ')
def environ(self):
"""This method showcases TG's access to the wsgi environment."""
return dict(environment=request.environ)
@expose('portal.templates.data')
@expose('json')
def data(self, **kw):
"""This method showcases how you can use the same controller for a data page and a display page"""
seishub_stations = "http://10.110.0.134/seismology/station/getList?format=json&network_id=BL"
req = urllib2.Request(seishub_stations)
opener = urllib2.build_opener()
f = opener.open(req)
json = loads(f.read())
#return dict(params=kw)
return dict(params=dict(args=kw, json=json))
@expose('portal.templates.authentication')
def auth(self):
"""Display some information about auth* on this application."""
return dict(page='auth')
@expose('portal.templates.index')
@require(predicates.has_permission('manage', msg=l_('Permitido apenas para funcionários')))
def manage_permission_only(self, **kw):
"""Illustrate how a page for managers only works."""
return dict(page='managers stuff')
@expose('portal.templates.index')
@require(predicates.is_user('editor', msg=l_('Permitido apenas para editor')))
def editor_user_only(self, **kw):
"""Illustrate how a page exclusive for the editor works."""
return dict(page='editor stuff')
@expose('portal.templates.login')
def login(self, came_from=url('/')):
"""Start the user login."""
login_counter = request.environ['repoze.who.logins']
if login_counter > 0:
flash(_('Usuario|Senha invalidos'), 'warning')
return dict(page='login', login_counter=str(login_counter),
came_from=came_from)
@expose()
def post_login(self, came_from=url('/')):
"""
Redirect the user to the initially requested page on successful
authentication or redirect her back to the login page if login failed.
"""
if not request.identity:
login_counter = request.environ['repoze.who.logins'] + 1
redirect(url('/login'),
params=dict(came_from=came_from, __logins=login_counter))
userid = request.identity['repoze.who.userid']
flash(_('Bem vindo novamente, %s!') % userid)
redirect(came_from)
@expose()
def post_logout(self, came_from=url('/')):
"""
Redirect the user to the initially requested page on logout and say
goodbye as well.
"""
flash(_('Esperamos ve-lo novamente em breve!'))
redirect(came_from)
@expose('portal.templates.m_world')
def m_world(self):
e = model.events.Events()
event_list = e.getAll()
geojson = e.getAllGeoJson(10)
return dict(page='index',
filterForm = "",
events = event_list[:10],
cycle = cycle,
geojson = geojson,
evt_png = url("/images/event.png"),
last_evt_png = url("/images/event.png"),
)
@expose('portal.templates.m_bsb')
def m_bsb(self):
b = model.bsb.BoletimSismico()
bsb_list = b.getAll()
geojson_bsb = b.getAllGeoJson(30)
return dict(page='m_bsb',
filterForm = "",
bsb = bsb_list[:30],
cycle = cycle,
geojson_bsb = geojson_bsb,
evt_png = url("/images/event.png"),
last_evt_png = url("/images/event.png"),
)
| {"/portal/controllers/events.py": ["/portal/lib/base.py", "/portal/__init__.py"], "/portal/controllers/stations.py": ["/portal/lib/base.py", "/portal/__init__.py"], "/portal/controllers/bsb.py": ["/portal/lib/base.py", "/portal/__init__.py"]} |
75,621 | preinh/seisPortal | refs/heads/master | /portal/controllers/downloadForms.py |
import tw2.core as twc
import tw2.forms as twf
import tw2.dynforms as twd
import tw2.jqplugins.ui as jqui
class DownloadForm(twf.Form):
submit = None
action = None
class child(twf.RowLayout):
repetition = 1
class network(twf.ListLayout):
id=None
network = twf.TextField(size=2)
class station(twf.ListLayout):
id=None
station = twf.TextField(size=5)
class channel(twf.ListLayout):
id=None
channel = twf.TextField(size=3)
class timeWindow(twf.TableLayout):
id = None
#repetitions = 1
date_f = jqui.widgets.DateTimePickerWidget(id="date_f",
label="De",
size=12,
options={
'dateFormat':'dd-mm-yy',
},
events={
'onClose': """
function(dateText, inst) {
if ($('#date_t').val() != '') {
var testStartDate = $('#date_f').datetimepicker('getDate');
var portalndDate = $('#date_t').datetimepicker('getDate');
if (testStartDate > portalndDate)
$('#date_t').datetimepicker('setDate', testStartDate);
}
else {
$('#date_t').val(dateText);
}
}"""
,
'onSelect': """
function (selectedDateTime){
$('#date_t').datetimepicker('option',
'minDate',
$('#date_f').datetimepicker('getDate')
);
}"""
,
}
)
date_t = jqui.widgets.DateTimePickerWidget(id="date_t",
label="Ate",
size=12,
options={
'dateFormat':'dd-mm-yy',
},
events={
'onClose': """
function(dateText, inst) {
if ($('#date_f').val() != '') {
var testStartDate = $('#date_f').datetimepicker('getDate');
var portalndDate = $('#date_t').datetimepicker('getDate');
if (testStartDate > portalndDate)
$('#date_f').datetimepicker('setDate', portalndDate);
}
else {
$('#date_f').val(dateText);
}
}"""
,
'onSelect': """
function (selectedDateTime){
$('#date_f').datetimepicker('option',
'maxDate',
$('#date_t').datetimepicker('getDate')
);
}
"""
}
)
class onehour(twf.ListLayout):
id=None
onhour = twf.CheckBox()
class type(twf.ListLayout):
id=None
type=twf.SingleSelectField(options=['SAC','MSEED'])
type.value = 'SAC'
class do(twf.SubmitButton):
value="Download"
| {"/portal/controllers/events.py": ["/portal/lib/base.py", "/portal/__init__.py"], "/portal/controllers/stations.py": ["/portal/lib/base.py", "/portal/__init__.py"], "/portal/controllers/bsb.py": ["/portal/lib/base.py", "/portal/__init__.py"]} |
75,622 | preinh/seisPortal | refs/heads/master | /portal/controllers/stations.py | #from tg import expose, request
from tg import expose, flash, require, url, lurl, request, redirect
from tg.i18n import ugettext as _, lazy_ugettext as l_
from portal.lib.base import BaseController
from portal import model
from itertools import cycle
import datetime
import tw2.core as twc
import tw2.forms as twf
import tw2.dynforms as twd
import tw2.jqplugins.ui as jqui
import stationsForms as sf
__all__ = ['StationsController']
class Station_Page(twc.Page):
title = "page"
child = sf.StationFilterForm()
class StationsController(BaseController):
#_s = model.stations.Stations()
@expose('portal.templates.stations')
def index(self, *args, **kw):
"""Handle the stations page."""
filter = ""
dat = {}
if kw != {}:
for k, v in kw.iteritems():
dat[k]=v
if v != '':
if k == "cod":
filter += " AND lower(station.m_code) LIKE lower('%s') " % ("%"+str(v)+"%")
elif k == "loc":
filter += " AND lower(station.m_description) LIKE lower('%s') " % ("%"+str(v)+"%")
elif k == "dep_f":
filter += " AND station.m_elevation >= %f " % (float(v))
elif k == "dep_t":
filter += " AND station.m_elevation <= %f " % (float(v))
elif k == "lat_f":
filter += " AND station.m_latitude <= %f " % (float(v))
elif k == "lat_t":
filter += " AND station.m_latitude <= %f " % (float(v))
elif k == "lon_f":
filter += " AND station.m_longitude <= %f " % (float(v))
elif k == "lon_t":
filter += " AND station.m_longitude <= %f " % (float(v))
_s = model.stations.Stations()
stations_list = _s.getAll(filter=filter)
json = _s.getAllJson()
f = sf.StationFilterForm().req()
return dict(page = 'stations',
filterForm=f,
data = dat,
stations = stations_list,
cycle = cycle,
json = json,
map_symbol = url("/images")+"/station.png",
)
@expose('portal.templates.stations')
def stations(self):
"""Handle the events page."""
#s = model.stations.Stations()
f = sf.StationFilterForm().req()
_s = model.stations.Stations()
stations_list = _s.getAll()
json = _s.getAllJson()
return dict(page='stations',
filterForm=f,
data = {},
stations = stations_list,
cycle = cycle,
json = json,
map_symbol = url("/images")+"/station.png",
)
@expose('portal.templates.station')
def _default(self, came_from=url('/')):
id = came_from
_s = model.stations.Stations()
#_s.getAll();
station_details = _s.getDetails(id)
return dict(page='station',
d = station_details)
| {"/portal/controllers/events.py": ["/portal/lib/base.py", "/portal/__init__.py"], "/portal/controllers/stations.py": ["/portal/lib/base.py", "/portal/__init__.py"], "/portal/controllers/bsb.py": ["/portal/lib/base.py", "/portal/__init__.py"]} |
75,623 | preinh/seisPortal | refs/heads/master | /portal/lib/base.py | # -*- coding: utf-8 -*-
"""The base Controller API."""
from tg import TGController, tmpl_context
from tg.render import render
from tg import request
from tg.i18n import ugettext as _, ungettext
import portal.model as model
from tg import url
import tw2.core.core
#import tw2.jquery
__all__ = ['BaseController']
# dev and prod MERGED!!! yes!
class BaseController(TGController):
"""
Base class for the controllers in the application.
Your web application should have one of these. The root of
your application is used to compute URLs used by your app.
"""
def __call__(self, environ, start_response):
"""Invoke the Controller"""
# TGController.__call__ dispatches to the Controller method
# the request is routed to. This routing information is
# available in environ['pylons.routes_dict']
request.identity = request.environ.get('repoze.who.identity')
tmpl_context.identity = request.identity
#return TGController.__call__(self, environ, start_response)
stream = TGController.__call__(self, environ, start_response)
# Disable the injection of tw2.jquery
#offending_link = tw2.jquery.jquery_js.req().link
local = tw2.core.core.request_local()
res = []
for r in local.get('resources', list()):
#if r.link != offending_link:
r.link = url(r.link)
res.append(r)
local['resources'] = res
return stream
| {"/portal/controllers/events.py": ["/portal/lib/base.py", "/portal/__init__.py"], "/portal/controllers/stations.py": ["/portal/lib/base.py", "/portal/__init__.py"], "/portal/controllers/bsb.py": ["/portal/lib/base.py", "/portal/__init__.py"]} |
75,624 | preinh/seisPortal | refs/heads/master | /portal/controllers/bsb.py | #from tg import expose, request
from tg import expose, flash, require, url, lurl, request, redirect
from tg.i18n import ugettext as _, lazy_ugettext as l_
from portal.lib.base import BaseController
from portal import model
from itertools import cycle
__all__ = ['BsbController']
import tw2.core as twc
#import tw2.forms as twf
#import tw2.dynforms as twd
#import tw2.jqplugins.ui as jqui
from datetime import datetime
#class Event_Page(twc.Page):
# title = "page"
# child = ef.EventFilterForm()
class BsbController(BaseController):
@expose('portal.templates.events')
def index(self, *args, **kw):
"""Handle the events page."""
e = model.bsb.BoletimSismico()
filter = ""
dat = {}
if kw != {}:
for k, v in kw.iteritems():
dat[k]=v
if v != '':
if k == "mag_f":
filter += " AND m_magnitude_value >= %f " % (float(v))
elif k == "mag_t":
filter += " AND m_magnitude_value <= %f " % (float(v))
elif k == "dep_f":
filter += " AND m_depth_value >= %f " % (float(v))
elif k == "dep_t":
filter += " AND m_depth_value <= %f " % (float(v))
elif k == "lat_f":
filter += " AND m_latitude_value <= %f " % (float(v))
elif k == "lat_t":
filter += " AND m_latitude_value <= %f " % (float(v))
elif k == "lon_f":
filter += " AND m_longitude_value <= %f " % (float(v))
elif k == "lon_t":
filter += " AND m_longitude_value <= %f " % (float(v))
elif k == "date_f":
e.b = datetime.strptime(v, "%d-%m-%Y %H:%M")
elif k == "date_t":
e.e = datetime.strptime(v, "%d-%m-%Y %H:%M")
event_list = e.getAll(filter=filter)
geojson = e.getAllGeoJson()
geojson_l = e.getLastGeoJson()
f = ''
return dict(page = 'events',
filterForm = f,
data = dat,
events = event_list,
cycle = cycle,
geojson = geojson,
geojson_l = geojson_l,
evt_png = url("/images/event.png"),
last_evt_png = url("/images/star2.png"),
)
@expose('portal.templates.events')
def events(self, *args, **kw):
"""Handle the events page."""
e = model.bsb.BoletimSismico()
event_list = e.getAll()
geojson = e.getAllGeoJson()
geojson_l = e.getLastGeoJson()
f = ''
#f = ef.EventFilterForm().req()
return dict(page='events',
filterForm = f,
events = event_list,
cycle = cycle,
geojson = geojson,
geojson_l = geojson_l,
evt_png = url("/images/event.png"),
last_evt_png = url("/images/star2.png"),
)
@expose('portal.templates.event')
def _default(self, came_from=url('/'), *args, **kw):
id = came_from
event_details = model.events.Events().getDetails(id)
f = ''
return dict(page='event',
filterForm=f,
d = event_details)
| {"/portal/controllers/events.py": ["/portal/lib/base.py", "/portal/__init__.py"], "/portal/controllers/stations.py": ["/portal/lib/base.py", "/portal/__init__.py"], "/portal/controllers/bsb.py": ["/portal/lib/base.py", "/portal/__init__.py"]} |
75,625 | michoy/pineapple | refs/heads/master | /overview/tests.py | from django.test import Client
from django.test import TestCase
import django
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pineapple.settings")
from django.contrib.auth.models import User
from exercise import populate
class ServerTestCase(TestCase):
def setUp(self):
# Must be run, else crash
django.setup()
self.client = Client()
# Create test lecturer and the corresponding lecturer group.
u = User.objects.create_user(username='theTeach', email='teach@me.com', password='schooled')
g = populate.add_user_group('Lecturer')
u.groups.add(g)
populate.add_coursecollection(student='theTeach', course_list=[])
# Create test course to select later
populate.add_course(
name='TDTT3st',
full_name='L33t3st C0urse',
admin_list=['theTeach'],
material_list=[],
description='For testing purposes'
)
# Create test student
u = User.objects.create_user(username='theMan', email='the@man.no', password='thePa$$word')
g = populate.add_user_group('Student')
u.groups.add(g)
populate.add_coursecollection(student='theMan', course_list=['TDTT3st'])
def test_overview_student(self):
# Log in
self.client.login(username='theMan', password='thePa$$word')
# Can overview be reached?
resp = self.client.get('/overview/')
self.assertEqual(200, resp.status_code)
# Does the redirect work when the course is selected?
resp = self.client.post('/overview/', {'course-select': 'TDTT3st'})
self.assertEqual(302, resp.status_code)
self.assertEqual('/course/TDTT3st/', resp.url)
def test_overview_lecturer(self):
# Log in
self.client.login(username='theTeach', password='schooled')
# Can overview be reached?
resp = self.client.get('/overview/')
self.assertEqual(200, resp.status_code)
# Does redirect work when the course is selected?
resp = self.client.post('/overview/', {'course-select': 'TDTT3st'})
self.assertEqual(302, resp.status_code)
self.assertEqual('/course/TDTT3st/', resp.url)
def test_overview_redirect(self):
# If user is not logged in, is he redirected correctly?
resp = self.client.get('/overview/')
self.assertEqual(302, resp.status_code)
self.assertEqual('/login/?next=/overview/', resp.url)
| {"/exercise/views.py": ["/exercise/forms.py", "/exercise/models.py", "/botTester/AssistantBot.py", "/course/views.py"], "/course/forms.py": ["/exercise/models.py"], "/pineapple/urls.py": ["/overview/views.py", "/exercise/views.py", "/about/views.py", "/course/views.py"], "/about/views.py": ["/exercise/populate.py"], "/overview/views.py": ["/course/views.py"], "/about/tests.py": ["/exercise/populate.py"], "/course/views.py": ["/exercise/models.py", "/course/forms.py"], "/exercise/tests.py": ["/exercise/models.py"], "/botTester/AssistantBot.py": ["/exercise/populate.py", "/exercise/models.py"], "/exercise/populate.py": ["/exercise/models.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.