index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
19,028,830
|
trubru89/Shitman
|
refs/heads/main
|
/shitman/discardpile.py
|
class DiscardPile:
def __init__(self):
self.discard_pile = []
def add_to_discard_pile(self, cards):
self.discard_pile.append(cards)
def pick_discard_pile(self):
new_card_pile = [a_card for a_card in self.discard_pile]
self.discard_pile = []
return new_card_pile
|
{"/shitman/shitman_the_game.py": ["/shitman/gameboard.py"], "/shitman/carddeck.py": ["/shitman/card.py"], "/shitman/cardpile.py": ["/shitman/card.py"], "/shitman/shitmanplayer.py": ["/shitman/cardhand.py"], "/shitman/gameboard.py": ["/shitman/carddeck.py", "/shitman/shitmanplayer.py", "/shitman/aishitman.py", "/shitman/cardpile.py", "/shitman/discardpile.py"], "/shitman/aishitman.py": ["/shitman/cardhand.py"]}
|
19,028,831
|
trubru89/Shitman
|
refs/heads/main
|
/shitman/carddeck.py
|
import random
from shitman.card import Card
class CardDeck:
def __init__(self):
self.suits = ["Heart", "Spade", "Clove", "Diamond"]
self.values = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
self.ranks = ["two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "jack",
"queen", "king", "ace"]
self.deck = []
self.drawn_card = None
def compile_deck(self):
self.deck = [Card(suit, value, rank) for suit in self.suits for value, rank in zip(self.values, self.ranks)]
def shuffle_deck(self):
random.shuffle(self.deck)
def draw_from_deck(self):
if self.deck:
drawn_card = self.deck.pop(0)
return drawn_card
return False
def get_deck(self):
return [a_card for a_card in self.deck]
def show_deck(self):
print(self.deck)
# def draw_random_card(self):
# return random.choice(self.deck)
def deck_is_not_depleted(self):
return self.deck
|
{"/shitman/shitman_the_game.py": ["/shitman/gameboard.py"], "/shitman/carddeck.py": ["/shitman/card.py"], "/shitman/cardpile.py": ["/shitman/card.py"], "/shitman/shitmanplayer.py": ["/shitman/cardhand.py"], "/shitman/gameboard.py": ["/shitman/carddeck.py", "/shitman/shitmanplayer.py", "/shitman/aishitman.py", "/shitman/cardpile.py", "/shitman/discardpile.py"], "/shitman/aishitman.py": ["/shitman/cardhand.py"]}
|
19,028,832
|
trubru89/Shitman
|
refs/heads/main
|
/shitman/cardpile.py
|
from shitman.card import Card
class CardPile:
def __init__(self):
self.card_pile = []
self.empty_card = Card("No suit", 0, "No rank")
self.top_card = self.empty_card
def add_to_card_pile(self, card):
self.card_pile.insert(0, card)
def throw_card_pile(self):
cards_to_throw = self.card_pile
self.card_pile = []
return cards_to_throw
def get_top_card_in_card_pile(self):
if self.card_pile:
self.top_card = self.card_pile[0]
return self.top_card
else:
self.top_card = self.empty_card
return self.top_card
|
{"/shitman/shitman_the_game.py": ["/shitman/gameboard.py"], "/shitman/carddeck.py": ["/shitman/card.py"], "/shitman/cardpile.py": ["/shitman/card.py"], "/shitman/shitmanplayer.py": ["/shitman/cardhand.py"], "/shitman/gameboard.py": ["/shitman/carddeck.py", "/shitman/shitmanplayer.py", "/shitman/aishitman.py", "/shitman/cardpile.py", "/shitman/discardpile.py"], "/shitman/aishitman.py": ["/shitman/cardhand.py"]}
|
19,028,833
|
trubru89/Shitman
|
refs/heads/main
|
/shitman/shitmanplayer.py
|
from shitman.cardhand import CardHand
class Player(CardHand):
def __init__(self):
self.player_name = input("What is the player name? ")
super().__init__()
def get_player_name(self):
return self.player_name
@staticmethod
def is_real_player():
return True
|
{"/shitman/shitman_the_game.py": ["/shitman/gameboard.py"], "/shitman/carddeck.py": ["/shitman/card.py"], "/shitman/cardpile.py": ["/shitman/card.py"], "/shitman/shitmanplayer.py": ["/shitman/cardhand.py"], "/shitman/gameboard.py": ["/shitman/carddeck.py", "/shitman/shitmanplayer.py", "/shitman/aishitman.py", "/shitman/cardpile.py", "/shitman/discardpile.py"], "/shitman/aishitman.py": ["/shitman/cardhand.py"]}
|
19,028,834
|
trubru89/Shitman
|
refs/heads/main
|
/app.py
|
import os
from flask import Flask
from shitman import shitman_the_game
app = Flask(__name__)
@app.route("/")
def shitman_the_game():
pass
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(os.getenv('PORT', 5000)))
|
{"/shitman/shitman_the_game.py": ["/shitman/gameboard.py"], "/shitman/carddeck.py": ["/shitman/card.py"], "/shitman/cardpile.py": ["/shitman/card.py"], "/shitman/shitmanplayer.py": ["/shitman/cardhand.py"], "/shitman/gameboard.py": ["/shitman/carddeck.py", "/shitman/shitmanplayer.py", "/shitman/aishitman.py", "/shitman/cardpile.py", "/shitman/discardpile.py"], "/shitman/aishitman.py": ["/shitman/cardhand.py"]}
|
19,028,835
|
trubru89/Shitman
|
refs/heads/main
|
/shitman/gameboard.py
|
from shitman.carddeck import CardDeck
from shitman.shitmanplayer import Player
from shitman.aishitman import AiPlayer
from shitman.cardpile import CardPile
from shitman.discardpile import DiscardPile
class GameBoard:
def __init__(self, number_of_players=2, number_of_ai=0):
self.number_of_real_players = number_of_players
self.number_of_ai = number_of_ai
self.game_deck = CardDeck()
self.game_deck.compile_deck()
self.existing_players = []
self.temp_cards = []
self.card_pile = CardPile()
self.discard_pile = DiscardPile()
def set_up_players(self):
for player in range(self.number_of_real_players):
self.existing_players.append(Player())
for ai in range(self.number_of_ai):
self.existing_players.append(AiPlayer())
return [player for player in self.existing_players]
def shuffle_board_deck(self): # pointless use game_deck shuffle insead
self.game_deck.shuffle_deck()
# Below 3 functions should be one function...
def get_player_start_hand(self):
return [self.game_deck.draw_from_deck() for self.draw_card in range(3)]
def get_player_turndown(self):
return [self.game_deck.draw_from_deck() for self.draw_card in range(3)]
def get_player_turnup(self):
return [self.game_deck.draw_from_deck() for self.draw_card in range(3)]
def draw_from_game_deck(self):
return self.game_deck.draw_from_deck()
def game_deck_is_not_depleted(self):
return self.game_deck.deck_is_not_depleted()
def add_to_card_pile(self, card):
self.card_pile.add_to_card_pile(card)
def discard_card_pile(self):
self.discard_pile.add_to_discard_pile(self.card_pile.throw_card_pile())
def top_card_in_card_pile(self):
return self.card_pile.get_top_card_in_card_pile()
def get_card_pile(self):
return self.card_pile.throw_card_pile()
def player_draw_card(self, player):
if self.game_deck.deck_is_not_depleted():
player.add_card_to_hand(self.game_deck.draw_from_deck())
|
{"/shitman/shitman_the_game.py": ["/shitman/gameboard.py"], "/shitman/carddeck.py": ["/shitman/card.py"], "/shitman/cardpile.py": ["/shitman/card.py"], "/shitman/shitmanplayer.py": ["/shitman/cardhand.py"], "/shitman/gameboard.py": ["/shitman/carddeck.py", "/shitman/shitmanplayer.py", "/shitman/aishitman.py", "/shitman/cardpile.py", "/shitman/discardpile.py"], "/shitman/aishitman.py": ["/shitman/cardhand.py"]}
|
19,028,836
|
trubru89/Shitman
|
refs/heads/main
|
/Tests/test_shitman.py
|
import unittest
from shitman import carddeck
from shitman import card
from shitman import gameboard
from shitman import cardhand
# Card deck tests
def test_card_deck_size():
cd = carddeck.CardDeck()
cd.compile_deck()
assert len(cd.get_deck()) == 52, "Created deck does not have 52 cards"
def test_draw_whole_deck():
cd = carddeck.CardDeck()
cd.compile_deck()
for a_card in range(len(cd.get_deck())):
draw_card = cd.draw_from_deck()
assert cd.draw_from_deck() is False
def test_shuffle_deck():
cd = carddeck.CardDeck()
cd.compile_deck()
new_deck = cd.get_deck()
cd.shuffle_deck()
shuffled_deck = cd.get_deck()
assert new_deck != shuffled_deck
# Card test
def test_card():
one_card = card.Card("Heart", 2, "two")
assert one_card.get_suit() == "Heart"
assert one_card.get_value() == 2
assert one_card.get_rank() == "two"
# Gameboard tests
def test_set_up_players():
gameboard_one = gameboard.GameBoard()
players_in_gameboard = gameboard_one.set_up_players()
assert len(players_in_gameboard) == 2 # Default 2 players
def test_get_player_start_hand():
gameboard_two = gameboard.GameBoard()
player_start_hand = gameboard_two.get_player_start_hand()
assert len(player_start_hand) == 3
assert type(player_start_hand[0].get_suit()) is str
assert type(player_start_hand[0].get_value()) is int
assert type(player_start_hand[0].get_rank()) is str
def test_game_deck_draw_card():
gameboard_three = gameboard.GameBoard()
assert gameboard_three.game_deck_is_depleted() is False
for game_card in range(0, 52):
card_from_deck = gameboard_three.draw_from_game_deck()
assert gameboard_three.draw_from_game_deck() is False
assert gameboard_three.game_deck_is_depleted() is True
# Cardhand tests.
# Create player hand
# Add a card
# Get smallest card value from hand
# Remove a card until hand is empty
def test_start_hand():
cd = carddeck.CardDeck()
cd.compile_deck()
player_hand = []
for cards_in_start_hand in range(0, 3):
player_hand.append(cd.draw_from_deck())
ch = cardhand.CardHand()
ch.get_start_hand(player_hand)
assert ch.number_of_cards_in_hand() == 3
ch.add_card(cd.draw_from_deck())
assert ch.number_of_cards_in_hand() == 4
assert ch.show_lowest_card_in_hand() == 2
ch.remove_card(0)
assert ch.number_of_cards_in_hand() == 3
ch.remove_card(0)
assert ch.number_of_cards_in_hand() == 2
ch.remove_card(0)
assert ch.number_of_cards_in_hand() == 1
ch.remove_card(0)
assert ch.number_of_cards_in_hand() == 0
|
{"/shitman/shitman_the_game.py": ["/shitman/gameboard.py"], "/shitman/carddeck.py": ["/shitman/card.py"], "/shitman/cardpile.py": ["/shitman/card.py"], "/shitman/shitmanplayer.py": ["/shitman/cardhand.py"], "/shitman/gameboard.py": ["/shitman/carddeck.py", "/shitman/shitmanplayer.py", "/shitman/aishitman.py", "/shitman/cardpile.py", "/shitman/discardpile.py"], "/shitman/aishitman.py": ["/shitman/cardhand.py"]}
|
19,028,837
|
trubru89/Shitman
|
refs/heads/main
|
/shitman/card.py
|
class Card:
def __init__(self, suit, value, rank):
self.suit = suit
self.value = value
self.rank = rank
def get_suit(self):
return self.suit
def get_value(self):
return self.value
def get_rank(self):
return self.rank
|
{"/shitman/shitman_the_game.py": ["/shitman/gameboard.py"], "/shitman/carddeck.py": ["/shitman/card.py"], "/shitman/cardpile.py": ["/shitman/card.py"], "/shitman/shitmanplayer.py": ["/shitman/cardhand.py"], "/shitman/gameboard.py": ["/shitman/carddeck.py", "/shitman/shitmanplayer.py", "/shitman/aishitman.py", "/shitman/cardpile.py", "/shitman/discardpile.py"], "/shitman/aishitman.py": ["/shitman/cardhand.py"]}
|
19,028,838
|
trubru89/Shitman
|
refs/heads/main
|
/shitman/aishitman.py
|
from shitman.cardhand import CardHand
class AiPlayer(CardHand):
def __init__(self):
super().__init__()
def ai_play_card(self, current_card_in_pile_value):
pass
@staticmethod
def is_real_player():
return False
|
{"/shitman/shitman_the_game.py": ["/shitman/gameboard.py"], "/shitman/carddeck.py": ["/shitman/card.py"], "/shitman/cardpile.py": ["/shitman/card.py"], "/shitman/shitmanplayer.py": ["/shitman/cardhand.py"], "/shitman/gameboard.py": ["/shitman/carddeck.py", "/shitman/shitmanplayer.py", "/shitman/aishitman.py", "/shitman/cardpile.py", "/shitman/discardpile.py"], "/shitman/aishitman.py": ["/shitman/cardhand.py"]}
|
19,065,833
|
buhanec/starparse
|
refs/heads/main
|
/starparse/config.py
|
"""Basic config."""
import os
__all__ = ('UTF8', 'ORDERED_DICT')
def _bool(env_var: str, default: bool) -> bool:
if env_var not in os.environ:
return default
return os.environ[env_var].upper() in ('1', 'T', 'TRUE')
UTF8 = _bool('STARPARSE_UTF8', False)
ORDERED_DICT = _bool('STARPARSE_ORDERED_DICT', True)
|
{"/starparse/tests/test_ci.py": ["/starparse/__init__.py"], "/starparse/pack.py": ["/starparse/__init__.py"], "/starparse/tests/__init__.py": ["/starparse/__init__.py"], "/starparse/unpack.py": ["/starparse/__init__.py"], "/tests/test_e2e.py": ["/starparse/__init__.py"], "/tests/test_pack_unpack.py": ["/starparse/__init__.py"]}
|
19,065,834
|
buhanec/starparse
|
refs/heads/main
|
/tests/test_e2e.py
|
try:
from importlib.resources import read_binary
except ModuleNotFoundError:
from importlib_resources import read_binary
import itertools
import pytest
from starparse import config, pack, unpack
from tests import ci_players
@pytest.mark.parametrize('player, ordered_dict',
itertools.product(ci_players.PLAYERS, (True, False)))
def test_unordered(player, ordered_dict, monkeypatch):
monkeypatch.setattr(config, 'ORDERED_DICT', ordered_dict)
file = read_binary(ci_players, f'{player}.player')
# Unpack
save_format, entity, flags, offset = unpack.header(file, 0)
unpacked, limit = unpack.typed(file, offset)
assert limit == len(file)
assert save_format == b'SBVJ01'
# Pack
packed = pack.header(save_format, entity, flags) + pack.typed(unpacked)
assert len(packed) == len(file)
# Re-unpack
save_format_2, entity_2, flags_2, offset_2 = unpack.header(packed, 0)
unpacked_2, limit_2 = unpack.typed(packed, offset_2)
assert save_format == save_format_2
assert entity == entity_2
assert flags == flags_2
assert offset == offset_2
assert limit == limit_2
assert unpacked == unpacked_2
if ordered_dict:
assert packed == file
|
{"/starparse/tests/test_ci.py": ["/starparse/__init__.py"], "/starparse/pack.py": ["/starparse/__init__.py"], "/starparse/tests/__init__.py": ["/starparse/__init__.py"], "/starparse/unpack.py": ["/starparse/__init__.py"], "/tests/test_e2e.py": ["/starparse/__init__.py"], "/tests/test_pack_unpack.py": ["/starparse/__init__.py"]}
|
19,065,835
|
buhanec/starparse
|
refs/heads/main
|
/starparse/pack.py
|
"""Packing functionality."""
from collections import OrderedDict
from functools import wraps
from struct import pack
from typing import Any, Callable, Dict, List, TypeVar, Union, get_type_hints
from starparse import config
T = TypeVar('T')
SBT = Union[str, int, float, list, dict, OrderedDict]
__all__ = ('PackingError', 'uint', 'int_', 'str_', 'bool_', 'none',
'float_', 'type_', 'list_', 'dict_', 'typed', 'header')
class PackingError(Exception):
"""Packing error."""
def check_type(f: Callable[[T], bytearray]) -> Callable[[T], bytearray]:
"""
Check function argument type.
:param f: function to check param value for
:return: function with param value checking
"""
@wraps(f)
def wrapper(value):
expecting = get_type_hints(f)['value']
if (expecting.__module__ == 'typing'
and expecting.__origin__ in (list, List)):
expecting = list
elif (expecting.__module__ == 'typing'
and expecting.__origin__ in (dict, Dict)):
if config.ORDERED_DICT:
expecting = OrderedDict
else:
expecting = dict
if not isinstance(value, expecting):
raise TypeError(f'{f.__module__}.{f.__name__} expecting '
f'{expecting.__name__} but got '
f'{type(value).__name__} ({value!r})')
return f(value)
return wrapper
@check_type
def uint(value: int) -> bytearray:
"""
Pack type to Starbound format.
:param value: unsigned int
:return: bytearray
:raises PackingError: when int negative
"""
if value < 0:
raise PackingError(f'unsigned int cannot be negative: {value}')
result = bytearray()
result.insert(0, value & 127)
value >>= 7
while value:
result.insert(0, value & 127 | 128)
value >>= 7
return result
@check_type
def int_(value: int) -> bytearray:
"""
Pack int to Starbound format.
:param value: int
:return: bytearray
"""
value_ = abs(value * 2)
if value < 0:
value_ -= 1
return uint(value_)
@check_type
def str_(value: str) -> bytearray:
"""
Pack string to Starbound format.
:param value: string
:return: bytearray
:raises PackingError: when string encoding error
"""
result = uint(len(value))
try:
result.extend(bytearray(value, 'ascii'))
except UnicodeEncodeError as e:
if config.UTF8:
result.extend(bytearray(value, 'utf-8'))
else:
raise PackingError(f'string encoding error: {value!r}') from e
return result
@check_type
def bool_(value: bool) -> bytearray:
"""
Pack bool to Starbound format.
:param value: bool
:return: bytearray
"""
return bytearray([value])
# pylint: disable=unused-argument
# noinspection PyUnusedLocal
def none(value: Any = None) -> bytearray:
"""
Pack None/unset to Starbound format.
:param value: unused
:return: bytearray
"""
return bytearray()
@check_type
def float_(value: float) -> bytearray:
"""
Pack float to Starbound format.
:param value: float
:return: bytearray
"""
return bytearray(pack('>d', value))
def type_(value: type) -> bytearray:
"""
Pack type to Starbound format.
:param value: type
:return: bytearray
:raises PackingError: when unsupported value type
"""
types = dict(zip((type(None), float, bool, int, str, list, dict),
range(1, 8)))
types[OrderedDict] = types[dict]
try:
value_type = types[value]
except KeyError as e:
raise PackingError(f'unsupported value type: {value}') from e
return uint(value_type)
@check_type
def list_(value: List[SBT]) -> bytearray:
"""
Pack list to Starbound format.
:param value: type
:return: bytearray
"""
result = uint(len(value))
for val in value:
result.extend(typed(val))
return result
@check_type
def dict_(value: Dict[str, SBT]) -> bytearray:
"""
Pack dict to Starbound format.
:param value: type
:return: bytearray
"""
result = uint(len(value))
for key, val in value.items():
result.extend(str_(key))
result.extend(typed(val))
return result
def typed(value: SBT) -> bytearray:
"""
Pack type and value to Starbound format.
:param value: value
:return: bytearray
"""
handlers: Dict[type, Callable[[Any], bytearray]] = {
type(None): none,
bool: bool_,
int: int_,
float: float_,
list: list_,
dict: dict_,
OrderedDict: dict_,
str: str_
}
result = type_(type(value))
result.extend(handlers[type(value)](value))
return result
def header(save_format: bytes, entity: str, flags: List[int]) -> bytearray:
"""
Pack Starbound header to Starbound format.
:param save_format: save format
:param entity: entity
:param flags: flags
:return: bytearray
"""
return bytearray(save_format) + str_(entity) + bytearray(flags)
|
{"/starparse/tests/test_ci.py": ["/starparse/__init__.py"], "/starparse/pack.py": ["/starparse/__init__.py"], "/starparse/tests/__init__.py": ["/starparse/__init__.py"], "/starparse/unpack.py": ["/starparse/__init__.py"], "/tests/test_e2e.py": ["/starparse/__init__.py"], "/tests/test_pack_unpack.py": ["/starparse/__init__.py"]}
|
19,065,836
|
buhanec/starparse
|
refs/heads/main
|
/starparse/util.py
|
"""Utility functions."""
import logging
from typing import Any
__all__ = ('diff',)
logger = logging.getLogger(__name__)
def diff(a: Any, b: Any, context: str) -> int:
"""
Diff two values.
:param a: First value
:param b: Second value
:param context: Context placing the values relative to root values
:return: Count of diffs
"""
if isinstance(a, dict) and isinstance(b, dict):
return _dict_diff(a, b, context=context)
if isinstance(a, list) and isinstance(b, list):
return _list_diff(a, b, context=context)
return _generic_diff(a, b, context=context)
def _dict_diff(a: Any, b: Any, context: str = 'base') -> int:
a_extra = a.keys() - b.keys()
b_extra = b.keys() - a.keys()
diffs = 0
if a_extra:
logger.warning(context)
logger.warning(' extra keys in a: %s', a_extra)
diffs += len(a_extra)
if b_extra:
logger.warning(context)
logger.warning(' extra keys in b: %s', b_extra)
diffs += len(b_extra)
for k in a.keys() & b.keys():
diffs += diff(a[k], b[k], context + '.' + k)
return diffs
def _list_diff(a: Any, b: Any, context: str = 'base') -> int:
if len(a) != len(b):
logger.warning(context)
logger.warning(' list len mismatch: %d, %d', len(a), len(b))
return max(len(a), len(b))
diffs = 0
for i, va, vb in zip(map(str, range(len(a))), a, b):
diffs += diff(va, vb, context + '[' + i + ']')
return diffs
def _generic_diff(a: Any, b: Any, context: str = 'base') -> int:
if a != b:
logger.warning(context)
logger.warning(' generic mismatch')
logger.warning(' %s (%s)', a, type(a).__name__)
logger.warning(' %s (%s)', b, type(b).__name__)
return 1
return 0
|
{"/starparse/tests/test_ci.py": ["/starparse/__init__.py"], "/starparse/pack.py": ["/starparse/__init__.py"], "/starparse/tests/__init__.py": ["/starparse/__init__.py"], "/starparse/unpack.py": ["/starparse/__init__.py"], "/tests/test_e2e.py": ["/starparse/__init__.py"], "/tests/test_pack_unpack.py": ["/starparse/__init__.py"]}
|
19,065,837
|
buhanec/starparse
|
refs/heads/main
|
/starparse/unpack.py
|
"""Unpacking functionality."""
from collections import OrderedDict
from struct import calcsize, unpack_from
from typing import Any, Dict, List, Tuple, Union
from starparse import config
__all__ = ('UnpackingError', 'struct', 'uint', 'int_', 'str_', 'bool_',
'none', 'float_', 'type_', 'list_', 'dict_', 'typed', 'header')
SBT = Union[None, str, int, float, list, dict, OrderedDict]
class UnpackingError(Exception):
"""Unpacking error."""
def struct(fmt: str, buffer: bytes, offset: int = 0) -> Tuple[Any, int]:
"""
Unpack struct from Starbound save file.
:param fmt: struct format
:param buffer: Starbound save file
:param offset: Starbound save file format
:return: data, new offset
:raises UnpackingError: when format not as expected
"""
unpacked = unpack_from(fmt, buffer, offset)
offset += calcsize(fmt)
# Unpacking string, join all bytes and decode
if all(isinstance(b, bytes) for b in unpacked):
try:
result = b''.join(unpacked).decode('ascii')
except UnicodeDecodeError as e:
if config.UTF8:
result = b''.join(unpacked).decode('utf-8')
else:
raise UnpackingError(f'ASCII decoding error {unpacked}') from e
return result, offset
# Single type (float, int, ...)
if len(unpacked) == 1:
return unpacked[0], offset
raise UnpackingError('Multiple non-bytes in bytearray')
def uint(buffer: bytes, offset: int = 0) -> Tuple[int, int]:
"""
Unpack unsigned int from Starbound save file.
:param buffer: Starbound save file
:param offset: position in Starbound save file
:return: unsigned int, new offset
"""
value = 0
while True:
tmp = buffer[offset]
value = (value << 7) | (tmp & 127)
offset += 1
if not tmp & 128:
break
return value, offset
def int_(buffer: bytes, offset: int = 0) -> Tuple[int, int]:
"""
Unpack signed int from Starbound save file.
:param buffer: Starbound save file
:param offset: position in Starbound save file
:return: int, new offset
"""
value = 0
while True:
tmp = buffer[offset]
value = (value << 7) | (tmp & 127)
offset += 1
if not tmp & 128:
break
if value & 1:
value = -((value >> 1) + 1)
else:
value >>= 1
return value, offset
def str_(buffer: bytes, offset: int = 0) -> Tuple[str, int]:
"""
Unpack str from Starbound save file.
:param buffer: Starbound save file
:param offset: position in Starbound save file
:return: str, new offset
"""
length, offset = uint(buffer, offset)
fmt = '{0:d}c'.format(length)
return struct(fmt, buffer, offset)
def bool_(buffer: bytes, offset: int = 0) -> Tuple[bool, int]:
"""
Unpack bool from Starbound save file.
:param buffer: Starbound save file
:param offset: position in Starbound save file
:return: bool, new offset
"""
return bool(buffer[offset]), offset + 1
# pylint: disable=unused-argument
# noinspection PyUnusedLocal
def none(buffer: bytes, offset: int = 0) -> Tuple[None, int]:
"""
Unpack None/unset from Starbound save file.
:param buffer: Starbound save file
:param offset: position in Starbound save file
:return: None, new offset
"""
return None, offset
def float_(buffer: bytes, offset: int = 0) -> Tuple[float, int]:
"""
Unpack float from Starbound save file.
:param buffer: Starbound save file
:param offset: position in Starbound save file
:return: float, new offset
"""
return struct('>d', buffer, offset)
def type_(buffer: bytes, offset: int = 0) -> Tuple[type, int]:
"""
Unpack type from Starbound save file.
:param buffer: Starbound save file
:param offset: position in Starbound save file
:return: type, new offset
:raises UnpackingError: when format not as expected
"""
types = [type(None), float, bool, int, str, list, dict]
index, offset = uint(buffer, offset)
if index > len(types):
raise UnpackingError(f'Unsupported value type: {index}')
return types[index - 1], offset
def list_(buffer: bytes, offset: int = 0) -> Tuple[List[SBT], int]:
"""
Unpack list from Starbound save file.
:param buffer: Starbound save file
:param offset: position in Starbound save file
:return: list, new offset
"""
length, offset = uint(buffer, offset)
result = []
for _ in range(length):
item, offset = typed(buffer, offset)
result.append(item)
return result, offset
def dict_(buffer: bytes, offset: int = 0) -> Tuple[Dict[str, SBT], int]:
"""
Unpack dict from Starbound save file.
:param buffer: Starbound save file
:param offset: position in Starbound save file
:return: dict, new offset
"""
length, offset = uint(buffer, offset)
result: Dict[str, SBT]
if config.ORDERED_DICT:
result = OrderedDict()
else:
result = {}
for _ in range(length):
key, offset = str_(buffer, offset)
item, offset = typed(buffer, offset)
result[key] = item
return result, offset
def typed(buffer: bytes, offset: int = 0) -> Tuple[SBT, int]:
"""
Unpack a typed data structure from the buffer at a given offset.
:param buffer: buffer to read
:param offset: offset in buffer
:return: unpacked data
"""
handlers = {
type(None): none,
bool: bool_,
int: int_,
float: float_,
list: list_,
dict: dict_,
str: str_
}
value_type, offset = type_(buffer, offset)
value, offset = handlers[value_type](buffer, offset)
return value, offset
def header(buffer: bytes, offset: int = 0) -> Tuple[bytes, str, List[int], int]:
"""
Unpack a Starbound header structure from the buffer at a given offset.
:param buffer: buffer to read
:param offset: offset in buffer
:return: Starbound header
"""
save_format = buffer[offset:offset + 6]
offset += 6
entity, offset = str_(buffer, offset=offset)
flags = list(buffer[offset:offset + 5])
offset += 5
return save_format, entity, flags, offset
|
{"/starparse/tests/test_ci.py": ["/starparse/__init__.py"], "/starparse/pack.py": ["/starparse/__init__.py"], "/starparse/tests/__init__.py": ["/starparse/__init__.py"], "/starparse/unpack.py": ["/starparse/__init__.py"], "/tests/test_e2e.py": ["/starparse/__init__.py"], "/tests/test_pack_unpack.py": ["/starparse/__init__.py"]}
|
19,065,838
|
buhanec/starparse
|
refs/heads/main
|
/starparse/__init__.py
|
"""Starbound save parser."""
from starparse import pack, unpack
__all__ = ('pack', 'unpack')
__version__ = '0.1.1'
|
{"/starparse/tests/test_ci.py": ["/starparse/__init__.py"], "/starparse/pack.py": ["/starparse/__init__.py"], "/starparse/tests/__init__.py": ["/starparse/__init__.py"], "/starparse/unpack.py": ["/starparse/__init__.py"], "/tests/test_e2e.py": ["/starparse/__init__.py"], "/tests/test_pack_unpack.py": ["/starparse/__init__.py"]}
|
19,065,839
|
buhanec/starparse
|
refs/heads/main
|
/tests/test_pack_unpack.py
|
from collections import OrderedDict
import math
from string import printable
from typing import Any, Dict, List, TYPE_CHECKING
from hypothesis import example, given
from hypothesis.strategies import floats, integers, text
import pytest
from starparse import config, pack, unpack
if TYPE_CHECKING:
import sys
if sys.version_info >= (3, 9):
OrderedDict_ = OrderedDict
elif sys.version_info >= (3, 7, 2):
from typing import OrderedDict as OrderedDict_
else:
OrderedDict_ = Dict
def parity(packer, unpacker, reference, asserter, packed_reference=None):
packed = packer(reference)
if packed_reference is not None:
asserter(packed, packed_reference)
unpacked, _ = unpacker(packed)
asserter(unpacked, reference)
@pytest.mark.parametrize('args, expected', [
(('4c', b'Alen'), ('Alen', 4)),
(('4c', b'Alen B'), ('Alen', 4)),
(('2c', b'Alen B', 2), ('en', 4)),
(('b', b'\x01\xFF'), (1, 1)),
(('b', b'\x00\xFF'), (0, 1)),
(('<i', b'\x00\x88\x00\x00'), (34816, 4))
])
def test_struct(args, expected):
assert unpack.struct(*args) == expected
@given(n=integers(min_value=0, max_value=2 ** 256))
def test_uint(n):
packed = pack.uint(n)
unpacked, offset = unpack.uint(packed)
assert offset == len(packed)
assert unpacked == n
@given(n=integers(min_value=-(2 ** 127), max_value=2 ** 127 - 1))
def test_int(n: int):
packed = pack.int_(n)
unpacked, offset = unpack.int_(packed)
assert offset == len(packed)
assert unpacked == n
@given(string=text(alphabet=printable))
@example(string=printable)
def test_str(string: str):
packed = pack.str_(string)
unpacked, offset = unpack.str_(packed)
assert offset == len(packed)
assert unpacked == string
@pytest.mark.parametrize('value', [True, False])
def test_bool(value: bool):
packed = pack.bool_(value)
unpacked, offset = unpack.bool_(packed)
assert offset == len(packed)
assert unpacked == value
def test_none():
packed = pack.none(None)
unpacked, offset = unpack.none(packed)
assert offset == len(packed)
assert unpacked is None
@pytest.mark.parametrize('n', [-float('nan'), float('nan')])
def test_float_nan(n: float):
packed = pack.float_(n)
unpacked, offset = unpack.float_(packed)
assert offset == len(packed)
assert math.isnan(unpacked)
assert math.copysign(1.0, unpacked) == math.copysign(1.0, n)
@pytest.mark.parametrize('n', [-float('nan'), float('nan')])
def test_float_nan(n: float):
packed = pack.float_(n)
unpacked, offset = unpack.float_(packed)
assert offset == len(packed)
assert math.isnan(unpacked)
assert math.copysign(1.0, unpacked) == math.copysign(1.0, n)
@given(n=floats(allow_nan=False))
@example(n=float('inf'))
@example(n=-float('inf'))
def test_float(n: float):
packed = pack.float_(n)
unpacked, offset = unpack.float_(packed)
assert offset == len(packed)
assert unpacked == n
@pytest.mark.parametrize('packed, expected', [
(b'\x01', type(None)),
(b'\x02', float),
(b'\x03', bool),
(b'\x04', int),
(b'\x05', str),
(b'\x06', list),
(b'\x07', dict),
])
def test_types(packed: bytes, expected: Any, monkeypatch):
unpacked, offset = unpack.type_(packed)
assert offset == len(packed)
assert unpacked == expected
@pytest.mark.parametrize('value', [
[],
[1, 2, 3],
["a", "b", "c"],
[1, "b", []],
[1, "b", [1, "c"]],
])
def test_list(value: List[Any]):
packed = pack.list_(value)
unpacked, offset = unpack.list_(packed)
assert offset == len(packed)
assert unpacked == value
@pytest.mark.parametrize('value', [
{},
{"a": 1, "b": 2},
{"a": "a", "b": 2},
{"a": {"b": {}}},
])
def test_dict(value: Dict[Any, Any], monkeypatch):
monkeypatch.setattr(config, 'ORDERED_DICT', False)
packed = pack.dict_(value)
unpacked, offset = unpack.dict_(packed)
assert offset == len(packed)
assert unpacked == value
@pytest.mark.parametrize('value', [
OrderedDict({}),
OrderedDict({"a": 1, "b": 2}),
OrderedDict({"a": "a", "b": 2}),
OrderedDict({"a": OrderedDict({"b": OrderedDict({})})}),
])
def test_dict_ordered(value: 'OrderedDict_[Any, Any]', monkeypatch):
monkeypatch.setattr(config, 'ORDERED_DICT', True)
packed = pack.dict_(value)
unpacked, offset = unpack.dict_(packed)
assert offset == len(packed)
assert unpacked == value
|
{"/starparse/tests/test_ci.py": ["/starparse/__init__.py"], "/starparse/pack.py": ["/starparse/__init__.py"], "/starparse/tests/__init__.py": ["/starparse/__init__.py"], "/starparse/unpack.py": ["/starparse/__init__.py"], "/tests/test_e2e.py": ["/starparse/__init__.py"], "/tests/test_pack_unpack.py": ["/starparse/__init__.py"]}
|
19,065,840
|
buhanec/starparse
|
refs/heads/main
|
/tests/ci_players/__init__.py
|
"""Player files for use in tests."""
PLAYERS = ['amadan', 'babybirne', 'gluhbirne']
|
{"/starparse/tests/test_ci.py": ["/starparse/__init__.py"], "/starparse/pack.py": ["/starparse/__init__.py"], "/starparse/tests/__init__.py": ["/starparse/__init__.py"], "/starparse/unpack.py": ["/starparse/__init__.py"], "/tests/test_e2e.py": ["/starparse/__init__.py"], "/tests/test_pack_unpack.py": ["/starparse/__init__.py"]}
|
19,071,588
|
jasonchristopherchandra/final_proto
|
refs/heads/master
|
/final_proto/final_proto_app/functions.py
|
from urllib.parse import urlparse, parse_qs
from pprint import pprint
from google_auth_oauthlib.flow import Flow, InstalledAppFlow
from googleapiclient.discovery import build
from google.oauth2.credentials import Credentials
from allauth.socialaccount.models import SocialToken, SocialApp
from django.shortcuts import render,redirect
from django.http import HttpResponseRedirect,HttpResponse
from pytchat import LiveChat, CompatibleProcessor
from chat_downloader import ChatDownloader
import time
import pytchat
import subprocess
import shlex
import requests
from django.http import StreamingHttpResponse
from django.views.decorators.csrf import csrf_exempt
import json
import os
import signal
from django.http import JsonResponse
subprocess_value = None
def extract_video_id(url):
query = urlparse(url)
if query.hostname == 'youtu.be': return query.path[1:]
if query.hostname in {'www.youtube.com', 'youtube.com'}:
if query.path == '/watch': return parse_qs(query.query)['v'][0]
if query.path[:7] == '/embed/': return query.path.split('/')[2]
if query.path[:3] == '/v/': return query.path.split('/')[2]
print(query)
return None
def check_active_livechat(url, request):
id = extract_video_id(url)
print(id)
token = SocialToken.objects.get(account__user=request.user, account__provider='google')
print(token)
# CLIENT_SECRET_FILE = 'client_secret_51870834106-rtq1bi2n4n6cme450auv0iffv9fpokre.apps.googleusercontent.com.json'
# flow = InstalledAppFlow.from_client_secrets_file(CLIENT_SECRET_FILE, SCOPES)
credentials = Credentials(
token=token.token,
refresh_token=token.token_secret,
token_uri='https://oauth2.googleapis.com/token',
client_id='375686044917-4ip2r585igrkf6kesp3ggmfd45f53433.apps.googleusercontent.com', # replace with yours
client_secret='HZgBiq-fG0_vrfsxHHNA7Ptu') # replace with yours
print("secret token:"+ " "+credentials.refresh_token)
service = build('youtube', 'v3', credentials=credentials)
print(service)
part_string = 'snippet,liveStreamingDetails'
video_ids = id
#find data using API regarding video
response = service.videos().list(
part=part_string,
id=video_ids
).execute()
try:
print(response['items'][0]['liveStreamingDetails']['activeLiveChatId'])
livechatstatus = 'alive'
return livechatstatus
except:
livechatstatus = 'dead'
return livechatstatus
return livechatstatus
def send_message(request):
data = json.loads(request.body.decode('UTF-8'))
url = data['url']
message = data['message']
id = extract_video_id(url)
print(id)
print(message)
token = SocialToken.objects.get(account__user=request.user, account__provider='google')
print(token)
# CLIENT_SECRET_FILE = 'client_secret_51870834106-rtq1bi2n4n6cme450auv0iffv9fpokre.apps.googleusercontent.com.json'
# flow = InstalledAppFlow.from_client_secrets_file(CLIENT_SECRET_FILE, SCOPES)
credentials = Credentials(
token=token.token,
refresh_token=token.token_secret,
token_uri='https://oauth2.googleapis.com/token',
client_id='375686044917-4ip2r585igrkf6kesp3ggmfd45f53433.apps.googleusercontent.com', # replace with yours
client_secret='HZgBiq-fG0_vrfsxHHNA7Ptu') # replace with yours
print("secret token:"+ " "+credentials.refresh_token)
service = build('youtube', 'v3', credentials=credentials)
print(service)
part_string = 'snippet,liveStreamingDetails'
video_ids = id
#find data using API regarding video
response = service.videos().list(
part=part_string,
id=video_ids
).execute()
channelid = response['items'][0]['snippet']['channelId']
livechatid = response['items'][0]['liveStreamingDetails']['activeLiveChatId']
response2 = service.liveChatMessages().insert(
part = 'snippet',
body = dict (
snippet = dict(
liveChatId = livechatid,
type = "textMessageEvent",
textMessageDetails = dict(
messageText = message
)
)
)
).execute()
return JsonResponse("success", status=200, safe=False)
def view_message(request):
data = json.loads(request.body.decode('UTF-8'))
print(data)
url = data['url']
token = data['token']
print(url)
print(data)
function_call = 'python3 tester3.py ' + str(url) +" "+ str(token)
print("this is the function call " + function_call)
print("is this weird")
return HttpResponse("subprocess started")
return JsonResponse("success",status=200, safe=False)
|
{"/final_proto/final_proto_app/views.py": ["/final_proto/final_proto_app/forms.py", "/final_proto/final_proto_app/functions.py"], "/final_proto/final_proto_app/urls.py": ["/final_proto/final_proto_app/functions.py"]}
|
19,071,589
|
jasonchristopherchandra/final_proto
|
refs/heads/master
|
/final_proto/final_proto_app/urls.py
|
from django.urls import path
from . import views
from django.views.generic import TemplateView
from .functions import send_message,view_message
urlpatterns = [
path('send_message/', send_message, name = "call_yt"),
path('get_url/', views.retrieveURL, name = "url_getter"),
path('view_message/', view_message, name = "get_translated_messages"),
path('enter_url/', views.enter_url, name='enter_url')
]
|
{"/final_proto/final_proto_app/views.py": ["/final_proto/final_proto_app/forms.py", "/final_proto/final_proto_app/functions.py"], "/final_proto/final_proto_app/urls.py": ["/final_proto/final_proto_app/functions.py"]}
|
19,071,590
|
jasonchristopherchandra/final_proto
|
refs/heads/master
|
/final_proto/final_proto_app/views.py
|
from django.http import HttpResponseRedirect
from django.shortcuts import render
from .functions import send_message,view_message,check_active_livechat
from django.core.exceptions import PermissionDenied
from django.contrib import messages
import pytchat
def enter_url(request):
if request.user.is_authenticated:
return render(request, "enter_url.html")
else:
raise PermissionDenied()
def retrieveURL(request):
if request.user.is_authenticated:
title = ""
context = {}
if request.method == "POST":
title = str(request.POST.get('URL'))
if title == '':
messages.warning(request, 'Please enter url !')
return render(request, 'enter_url.html')
print(title)
print(isinstance(title, str))
livechatstatus = check_active_livechat(title,request)
videoDetails = {'title':title}
context = {'title':title,'livechatstatus':livechatstatus}
return render(request, "viewsend.html", context)
else:
raise PermissionDenied()
|
{"/final_proto/final_proto_app/views.py": ["/final_proto/final_proto_app/forms.py", "/final_proto/final_proto_app/functions.py"], "/final_proto/final_proto_app/urls.py": ["/final_proto/final_proto_app/functions.py"]}
|
19,071,591
|
jasonchristopherchandra/final_proto
|
refs/heads/master
|
/final_proto/final_proto_app/apps.py
|
from django.apps import AppConfig
class FinalProtoAppConfig(AppConfig):
name = 'final_proto_app'
|
{"/final_proto/final_proto_app/views.py": ["/final_proto/final_proto_app/forms.py", "/final_proto/final_proto_app/functions.py"], "/final_proto/final_proto_app/urls.py": ["/final_proto/final_proto_app/functions.py"]}
|
19,071,592
|
jasonchristopherchandra/final_proto
|
refs/heads/master
|
/final_proto/tester3.py
|
from chat_downloader import ChatDownloader
import sys
import json
import re
import emoji
import requests
import os
import time
from firebase_admin import messaging,credentials
import firebase_admin
cred = credentials.Certificate("chattranslator-2c03a-firebase-adminsdk-8554s-175f86d014.json")
firebase_admin.initialize_app(cred)
print(str(sys.argv[1]))
try:
chat = ChatDownloader().get_chat(str(sys.argv[1])) #gets yt chat message
for message in chat:
chatlist = { } # iterate over messages
chatlist['author'] = message['author']['name'].encode("ascii", errors="ignore").decode()
chatlist['message'] = message['message'].encode("ascii", errors="ignore").decode()
url = 'http://localhost:5000/translate_view' #call ai translation
data = {
"author": chatlist['author'],
"message": chatlist['message'],
}
response = requests.post(url, json=data)
registration_token = str(sys.argv[2])
# See documentation on defining a message payload.
message = messaging.Message(
data={
'author': response.json()['author'],
'message': response.json()['message'],
'translated_message': response.json()['translated_message']
},
token=registration_token,
)
# Send a message to the device corresponding to the provided
# registration token.
time.sleep(1.5)
response = messaging.send(message)
# Response is a message ID string.
time.sleep(0.1)
except Exception as inst:
print("An exception occurred")
print(type(inst)) # the exception instance
print(inst.args) # arguments stored in .args
print(inst)
registration_token = str(sys.argv[2])
message = messaging.Message(
data={
'author': "SYSTEM",
'message': "VIDEO IS NOT A LIVE STREAM",
'translated_message':"VIDEO IS NOT A LIVE STREAM"
},
token=registration_token,
)
response = messaging.send(message)
|
{"/final_proto/final_proto_app/views.py": ["/final_proto/final_proto_app/forms.py", "/final_proto/final_proto_app/functions.py"], "/final_proto/final_proto_app/urls.py": ["/final_proto/final_proto_app/functions.py"]}
|
19,071,593
|
jasonchristopherchandra/final_proto
|
refs/heads/master
|
/api/__main__.py
|
from flask import Flask, request, jsonify
from flask_cors import CORS, cross_origin
import sys
import re
import math
from io import open
import numpy as np
import matplotlib.pyplot as plt
import pickle
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
import torch.cuda
from production_model import evaluate,Lang,EncoderRNN,DecoderAttn
# this line clears sys to allow for argparse to work as gradient clipper
import sys; sys.argv=['']; del sys
app = Flask(__name__)
CORS(app, support_credentials=True)
bidirectional = True
if bidirectional:
directions = 2
else:
directions = 1
# number of layers in both the Encoder and Decoder
layers = 2
# Hidden size of the Encoder and Decoder
hidden_size = 600
# Dropout value for Encoder and Decoder
dropout = 0.6
# LOAD CONFIGURATIONS
# Set the common name of the loading files
common_file_name = "testdata.tatoeba_identic_trim.20_vocab.25000_directions.2_layers.4_hidden.100_dropout.0.5_learningrate.1_batch.10_epochs.100"
id_lang = 'id'
en_lang = 'en'
dataset = 'tatoeba_identic'
directory = ''
# Set the name of the loading files
id_vocab_file = directory + id_lang + '_21484_' + dataset + '_vocab.p'
en_vocab_file = directory + en_lang + '_21731_' + dataset + '_vocab.p'
id_en_enc_file = '%s_%s_enc_direction_%s_layer_%s_hidden_%s_dropout_%s.pth' % (id_lang, en_lang, directions, layers, hidden_size, dropout)
id_en_dec_file = '%s_%s_dec_direction_%s_layer_%s_hidden_%s_dropout_%s.pth' % (id_lang, en_lang, directions, layers, hidden_size, dropout)
en_id_enc_file = '%s_%s_enc_direction_%s_layer_%s_hidden_%s_dropout_%s.pth' % (en_lang, id_lang, directions, layers, hidden_size, dropout)
en_id_dec_file = '%s_%s_dec_direction_%s_layer_%s_hidden_%s_dropout_%s.pth' % (en_lang, id_lang, directions, layers, hidden_size, dropout)
# Mandatory variables initialization
device = torch.device('cpu')
# use_cuda = torch.cuda.is_available()
id_vocab = None
en_vocab = None
id_en_encoder = None
id_en_decoder = None
en_id_encoder = None
en_id_decoder = None
# LOAD EVERYTHING
id_vocab = pickle.load(open(id_vocab_file,'rb'))
en_vocab = pickle.load(open(en_vocab_file,'rb'))
id_en_encoder = EncoderRNN(id_vocab.vocab_size, hidden_size, layers=layers,
dropout=dropout, bidirectional=bidirectional)
id_en_decoder = DecoderAttn(hidden_size, en_vocab.vocab_size, layers=layers,
dropout=dropout, bidirectional=bidirectional)
id_en_encoder.load_state_dict(torch.load(id_en_enc_file, map_location=device))
id_en_decoder.load_state_dict(torch.load(id_en_dec_file, map_location=device))
en_id_encoder = EncoderRNN(en_vocab.vocab_size, hidden_size, layers=layers,
dropout=dropout, bidirectional=bidirectional)
en_id_decoder = DecoderAttn(hidden_size, id_vocab.vocab_size, layers=layers,
dropout=dropout, bidirectional=bidirectional)
en_id_encoder.load_state_dict(torch.load(en_id_enc_file, map_location=device))
en_id_decoder.load_state_dict(torch.load(en_id_dec_file, map_location=device))
id_en_encoder.eval()
id_en_decoder.eval()
en_id_encoder.eval()
en_id_decoder.eval()
# app.config["DEBUG"] = True
def translate(str):
str = "sudah di edit"
return str
@app.route('/', methods=['GET'])
def home():
return "<h1>Proto Translation API for chat translator.</p>"
# GET requests will be blocked
@app.route('/translate_view', methods=['POST'])
def translate_view():
request_data = request.get_json()
# print(request_data, file=sys.stderr)
author = None
message = None
translated_message = None
if request_data:
if 'author' in request_data:
author = request_data['author']
if 'message' in request_data:
message = request_data['message']
translated_message = evaluate(en_id_encoder, en_id_decoder, en_vocab, id_vocab,request_data['message'], cutoff_length=20)
return jsonify({'author':author,'message':message,'translated_message':translated_message})
@app.route('/translate_send', methods=['POST'])
@cross_origin(supports_credentials=True)
def translate_send():
request_data = request.get_json()
print(request_data)
message = None
if request_data:
if 'message' in request_data:
message = evaluate(id_en_encoder, id_en_decoder, id_vocab, en_vocab,request_data['message'], cutoff_length=20)
return jsonify({'message':message})
app.run(host='0.0.0.0', port=5000, debug=True)
|
{"/final_proto/final_proto_app/views.py": ["/final_proto/final_proto_app/forms.py", "/final_proto/final_proto_app/functions.py"], "/final_proto/final_proto_app/urls.py": ["/final_proto/final_proto_app/functions.py"]}
|
19,071,594
|
jasonchristopherchandra/final_proto
|
refs/heads/master
|
/final_proto/routing.py
|
from django.urls import re_path
import tester4
websocket_urlpatterns = [
re_path(r'start_chat/', tester4.ChatConsumer.as_asgi()),
]
|
{"/final_proto/final_proto_app/views.py": ["/final_proto/final_proto_app/forms.py", "/final_proto/final_proto_app/functions.py"], "/final_proto/final_proto_app/urls.py": ["/final_proto/final_proto_app/functions.py"]}
|
19,071,595
|
jasonchristopherchandra/final_proto
|
refs/heads/master
|
/final_proto/tester4.py
|
from chat_downloader import ChatDownloader
import sys
import json
import re
import emoji
import requests
import os
import time
from firebase_admin import messaging,credentials
import firebase_admin
import asyncio
from channels.consumer import AsyncConsumer
import subprocess
import shlex
print('reached')
URL = None
token = None
class ChatConsumer(AsyncConsumer):
async def websocket_connect(self, event,URL=None,token=None):
print("connected", event)
print
print(URL)
print(token)
await self.send({
"type": "websocket.accept"
})
print(str(sys.argv[0]))
async def websocket_receive(self, event):
print("receive", event)
is_connected=True
data = json.loads(event['text'])
print(data)
url = data['url']
print(url)
token = data['token']
print(token)
global substart
substart = subprocess.Popen(shlex.split('python3 tester3.py ' + str(url) +" "+ str(token)))
async def websocket_disconnect(self, event):
print("disconnected", event)
substart.terminate()
|
{"/final_proto/final_proto_app/views.py": ["/final_proto/final_proto_app/forms.py", "/final_proto/final_proto_app/functions.py"], "/final_proto/final_proto_app/urls.py": ["/final_proto/final_proto_app/functions.py"]}
|
19,106,463
|
igudesman/flashscore
|
refs/heads/master
|
/signal.py
|
import requests
from settings import TOKEN, chatID, message_form
from datetime import datetime
def telegram_bot_sendtext(match_info, DEBUG=False):
bot_token = TOKEN
bot_chatID = chatID
if DEBUG:
bot_message = str(datetime.now())
send_text = 'https://api.telegram.org/bot' + bot_token + '/sendMessage?chat_id=' + bot_chatID + '&parse_mode=Markdown&text=' + bot_message + '&disable_notification=True'
response = requests.get(send_text)
return response.json()
bot_message = message_form.format(league=match_info['league']['type'] + ': ' + match_info['league']['name'],
team_1=match_info['event_participant_home'],
team_2=match_info['event_participant_away'],
time=match_info['event_stage'],
url=match_info['event_url'])
for id in bot_chatID:
send_text = 'https://api.telegram.org/bot' + bot_token + '/sendMessage?chat_id=' + id + '&parse_mode=Markdown&text=' + bot_message
response = requests.get(send_text)
return response.json()
|
{"/signal.py": ["/settings.py"], "/desktop-scraping.py": ["/settings.py", "/signal.py"]}
|
19,106,464
|
igudesman/flashscore
|
refs/heads/master
|
/desktop-scraping.py
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
from settings import leagues, URL, COEF
from signal import telegram_bot_sendtext
import os
class Bot():
def __init__(self, url):
self.driver = webdriver.Chrome('chromedriver.exe')
self.already_alerted_ids = []
self.driver.implicitly_wait(10)
self.url = url
self.move_to_live_section()
# move to the live section
def move_to_live_section(self):
self.driver.get(self.url)
tabs = self.driver.find_elements_by_class_name('tabs__tab')
for tab in tabs:
if tab.text == 'LIVE':
tab.click()
print('Moved to LIVE section.')
break
def event_info(self, event_object):
rows = event_object.find_elements_by_tag_name('div')
event_stage = ''
event_participant_home = ''
event_participant_away = ''
for row in rows:
if row.get_attribute('class') == 'event__stage':
event_stage = row.text
if row.get_attribute('class') == 'event__participant event__participant--home':
event_participant_home = row.text
if row.get_attribute('class') == 'event__participant event__participant--away':
event_participant_away = row.text
id = event_object.get_attribute('id')
id = id.split('_')[-1]
event_url = 'https://www.flashscore.ru/match/{id}/#match-statistics;'.format(id=id)
return {'id': id,
'event_stage': event_stage,
'event_participant_home': event_participant_home,
'event_participant_away': event_participant_away,
'event_url': event_url}
def is_correct_league(self, league_object):
league = league_object.find_element_by_class_name('event__titleBox')
type = league.find_element_by_class_name('event__title--type').text
name = league.find_element_by_class_name('event__title--name').text
if type in leagues.keys():
if name in leagues[type]:
return True, {'type': type,
'name': name}
return False, None
def checking_loop(self, timeout):
while True:
# games_section = self.driver.find_element_by_class_name('sportName basketball')
try:
games_section = self.driver.find_element_by_xpath('/html/body/div[5]/div[1]/div/div[1]/div[2]/div[4]/div[2]/div[2]/div/div')
except:
print('Access is denied. Trying again..')
self.move_to_live_section()
self.checking_loop(timeout)
correct_league = False
league_info = None
block_id = 1
while True:
try:
event = self.driver.find_element_by_xpath('/html/body/div[5]/div[1]/div/div[1]/div[2]/div[4]/div[2]/div[2]/div/div/div[{}]'.format(block_id))
except:
break
try:
class_name = event.get_attribute('class')
except:
break
if 'event__header' in class_name:
correct_league, league_info = self.is_correct_league(event)
# print(league_info)
block_id += 1
continue
if correct_league:
match_info = self.event_info(event)
bet = self.calculate_indicator(match_info)
match_info['league'] = league_info
# telegram_bot_sendtext(match_info, True)
if bet:
if match_info['id'] not in self.already_alerted_ids:
self.already_alerted_ids.append(match_info['id'])
telegram_bot_sendtext(match_info, False)
block_id += 1
print('Iteration has gone.')
sleep(timeout)
def get_match_stats(self, content, quater):
block_id = 1
stat_rows = '/html/body/div[1]/div[1]/div[4]/div[12]/div[2]/div[4]/div[{q}]/div[{block_id}]'
result = {}
try:
self.driver.find_element_by_xpath('//*[@id="statistics-{q}-statistic"]'.format(q=quater)).click()
except:
return result
while True:
try:
stats = self.driver.find_element_by_xpath('//*[@id="tab-statistics-{q}-statistic"]/div[{block_id}]'.format(q=quater, block_id=block_id))
except:
break
try:
stats_class_name = stats.get_attribute('class')
except:
block_id += 1
continue
if (stats_class_name != 'statRow'):
block_id += 1
continue
try:
text_group = '//*[@id="tab-statistics-{q}-statistic"]/div[{block_id}]/div[1]/div[{text}]'
print
value_home = self.driver.find_element_by_xpath(text_group.format(q=quater, block_id=block_id, text=1)).text
title = self.driver.find_element_by_xpath(text_group.format(q=quater, block_id=block_id, text=2)).text
value_away = self.driver.find_element_by_xpath(text_group.format(q=quater, block_id=block_id, text=3)).text
if title in content:
result[title] = [value_home, value_away]
except:
return {}
block_id += 1
return result
def calculate_indicator(self, data):
minute = 0
quater = 0
if ('Перерыв' in data['event_stage']):
try:
self.driver.find_element_by_xpath('//*[@id="g_3_{id}"]/div[11]'.format(id=data['id']))
except:
print('Break: {0} - {1}'.format(data['event_participant_home'], data['event_participant_away']))
return False
# elif ('Завершен' not in data['event_stage']) and ('Перенесен' not in data['event_stage']) and (data['event_stage'] != ''):
# try:
# quater = int(data['event_stage'][0])
# minute = int(data['event_stage'].splitlines()[1])
# except:
# print('Something went wrong with event_stage: ', data['event_stage'])
# return False
else:
return False
print('{0} - {1}'.format(data['event_participant_home'], data['event_participant_away']))
# if (quater != 3):
# print('Not 3-rd quarter.')
# return False
# Switch to the new window and open URL B
self.driver.execute_script("window.open('');")
self.driver.switch_to.window(self.driver.window_handles[1])
self.driver.get(data['event_url'] + str(quater))
#sleep(10)
# getting quater's stats
BET = ''
for i in range(1, 4):
rows = ['Бросков с игры %']
stats = self.get_match_stats(rows, i)
print('{i} quarter: {stats}'.format(i=i, stats=stats))
if len(rows) != len(stats):
self.driver.close()
self.driver.switch_to.window(self.driver.window_handles[0])
return False
coef_home = float(stats[rows[0]][0].split('%')[0])
coef_away = float(stats[rows[0]][1].split('%')[0])
if (coef_home == 100.0) or (coef_away == 100.0):
self.driver.close()
self.driver.switch_to.window(self.driver.window_handles[0])
return False
if i == 3:
if (coef_home >= 70.0) and (coef_away >= 70.0):
self.driver.close()
self.driver.switch_to.window(self.driver.window_handles[0])
return False
elif coef_home >= 70:
BET = 'HOME'
elif coef_away >= 70:
BET = 'AWAY'
else:
self.driver.close()
self.driver.switch_to.window(self.driver.window_handles[0])
return False
print(BET)
# getting match odds
odds = self.driver.find_elements_by_tag_name('tr')
coefs = []
for odd in odds:
try:
if odd.get_attribute('class') == 'odd':
spans = odd.find_elements_by_tag_name('span')
for span in spans:
try:
if 'odds-wrap' in span.get_attribute('class'):
coefs.append(float(span.text))
except:
continue
except:
continue
print('COEFS: ', coefs)
if len(coefs) != 2:
print('Did not find coefs.')
if BET == 'HOME':
if coefs[0] < COEF:
self.driver.close()
self.driver.switch_to.window(self.driver.window_handles[0])
return False
elif coefs[1] < COEF:
self.driver.close()
self.driver.switch_to.window(self.driver.window_handles[0])
return False
self.driver.close()
self.driver.switch_to.window(self.driver.window_handles[0])
return True
def testing(bot):
data = {
'event_stage': '3 \n 10',
'event_url': 'https://www.flashscore.ru/match/f5jCDOpC/#match-statistics;'
}
print(bot.calculate_indicator(data))
if __name__ == '__main__':
bot = Bot(URL)
bot.checking_loop(10)
# testing(bot)
|
{"/signal.py": ["/settings.py"], "/desktop-scraping.py": ["/settings.py", "/signal.py"]}
|
19,106,465
|
igudesman/flashscore
|
refs/heads/master
|
/settings.py
|
leagues = {
'БОСНИЯ И ГЕРЦЕГОВИНА': ['Чемпионат'],
'ГЕРМАНИЯ': ['ББЛ'],
'ГРЕЦИЯ': ['Лига'],
'ИСПАНИЯ': ['АКБ'],
'ИТАЛИЯ': ['Лига А', 'А2 - Зеленая группа', 'А2 - Красная группа'],
'ТУРЦИЯ': ['Суперлига', 'ТБЛ'],
'ФРАНЦИЯ': ['ЛНБ', 'Про Б'],
'АВСТРИЯ': ['Суперлига'],
'АРГЕНТИНА': ['Лига А'],
'БЕЛЬГИЯ': ['Лига ЕвроМиллионс'],
'БОЛГАРИЯ': ['НБЛ'],
'ВЕНГРИЯ': ['НБ I А'],
'ГРУЗИЯ': ['Суперлига'],
'ЕВРОПА': ['НЛБ', 'Латвийско-эстонская баскетбольная лига', 'Евролига', 'Еврокубок', 'Лига Чемпионов'],
'КАТАР': ['КБЛ'],
'КИПР': ['Дивизион А'],
'КОСОВО': ['Суперлига'],
'ЛИТВА': ['ЛКЛ', 'НКЛ'],
'ПОЛЬША': ['Экстралига', 'Первая лига'],
'ПОРТУГАЛИЯ': ['ЛПБ'],
'СЕВЕРНАЯ МАКЕДОНИЯ': ['Суперлига'],
'СЕРБИЯ': ['Первая лига'],
'СЛОВАКИЯ': ['Высшая лига'],
'СЛОВЕНИЯ': ['Лига Нова КБМ'],
'ФИНЛЯНДИЯ': ['I Divisioona A', 'Высшая Лига'],
'ХОРВАТИЯ': ['Премьер-лига', 'Prva Liga'],
'ЧЕРНОГОРИЯ': ['Первая лига'],
'ЧЕХИЯ': ['НБЛ'],
'ШВЕЙЦАРИЯ': ['SB League'],
'ШВЕЦИЯ': ['Лига'],
'РОССИЯ': ['Единая лига ВТБ'],
'ВЕЛИКОБРИТАНИЯ': ['ББЛ'],
'РУМЫНИЯ': ['Дивизион А'],
'ЧИЛИ': [],
'НИДЕРЛАНДЫ': [],
'ИСЛАНДИЯ': []
}
URL = 'https://www.flashscore.ru/basketball/'
COEF = 1.15
TOKEN = '1416209037:AAH0PxZAW16ctU9JaNqEYfCSq3lAoNIx6Rg'
chatID = ['46506228', '1173821533']
message_form = """Приготовиться !!!
Лига: {league}
Играют: {team_1} - {team_2}
Время в матче: {time}
Ссылка на матч: {url}
Исход события напишет администратор.
Памятка :
Ставим ВСЕГДА 10% ОТ БАНКА
-тотол БОЛЬШЕ ставим на ОБЩЕМ МАТЧЕ
-тотол МЕНЬШЕ ставим на 4-ой четверти ."""
|
{"/signal.py": ["/settings.py"], "/desktop-scraping.py": ["/settings.py", "/signal.py"]}
|
19,174,060
|
Msalup/BlueTurma2B-mod1
|
refs/heads/main
|
/#gjhgjgjgj.py
|
"""L =[8, 5, 15]
for e in L:
print(e)"""
"""L=[8, 9, 15]
x=0
while x<len(L):
e=L[x]
print(e)
x+=1
s = "tigre"
print("X" + s.center(10) + "X")
x tigre x
print("X"+s.center(10, ".")+"X")"""
"""def soma(a,b):
print(a+b)
soma(2,9)
soma(7,8)
soma(10,15)
def soma(a,b):
return(a+b)
print(soma(2,9))"""
"""def épar(x):
return(x%2==0)
print(épar(2))
print(épar(3))
print(épar(10))"""
a = 5
print(a)
b = 2
a = b - b
print(a)
a = b - b
print('a')
|
{"/Aula07.1.py": ["/Aula07.py"]}
|
19,174,061
|
Msalup/BlueTurma2B-mod1
|
refs/heads/main
|
/Resolução de exercicio.py
|
"""list = [5, 7, 2, 9, 4, 1, 3]
print(len(list))
maior_valor = max(list)
print(maior_valor)
menor_valor = min(list)
print(menor_valor )
soma_valor = sum(list)
print(soma_valor)
list.sort()
print(list)
list.reverse()
print(list)
"""
"""
def conta_vogal(frase):
vogais = 0
for letra in frase:
if letra.upper() in "AEIOU":
vogais += 1
return vogais
frase_str = input("Digite uma palavra ou frase para contarmos as vogais: ")
print("A palavra/frase",frase_str,"possui",conta_vogal(frase_str),"vogais")"""
def some_vogal(frase):
vogais = "aeiouAEIOUÁÀÃÂáàâãÉÈÊêéèÓÒÔÕóòõôÚÙÛúùûüÍÌÎíìî"
for letra in vogais:
if letra in frase:
frase = frase.replace(letra,"")
return frase
str_frase = input("Digite uma palavra ou frase para retirarmos as vogais: ")
#print(str_frase)
print("A palavra ou frase que você digitou: '",str_frase.upper(), "'retiradas as vogais ficam segintes :", some_vogal(str_frase).upper())
|
{"/Aula07.1.py": ["/Aula07.py"]}
|
19,174,062
|
Msalup/BlueTurma2B-mod1
|
refs/heads/main
|
/TESTES-1.py
|
"""soma(a,b):
soma(2, 9)
soma(7, 8)
print(a+b)"""
"""lista1 = [4, 6, 4]
lista2 = ["a", "b", "v"]
lista1[0]"""
z = [15, 8, 9]
l = [6, 7, 8]
print(z [0])
z[0] = 7
"""print(z[0])
print(z)"""
z = l[:]
l.insert(0, 3)
print(l)
|
{"/Aula07.1.py": ["/Aula07.py"]}
|
19,174,063
|
Msalup/BlueTurma2B-mod1
|
refs/heads/main
|
/exemplos aula 8 for.py
|
L = [7,9,10,12]
p = int(input("Digite um número a pesquisar"))
for e in L:
if e == p:
print("Elemento encontrado!")
break
else:
print("Elemento não encontrato")
input()
|
{"/Aula07.1.py": ["/Aula07.py"]}
|
19,174,064
|
Msalup/BlueTurma2B-mod1
|
refs/heads/main
|
/exercicio1.py
|
def IMC(peso, altura):
#peso = 75
#1altura = 1.68
IMC = peso /(altura**2)
print("O IMC é = {0:.2f}".format(IMC))
peso = float(input("Digite o peso do indivíduo[DIGITE UM VALOR NUMÉRICO] ")
altura = float(input("Digite o peso do indivíduo[DIGITE UM VALOR NUMÉRICO] ")
try:
peso ´float(peso)
altura = float(altura)
IMC(peso, altura)
except:
print("VOCÊ DIGITOU DADOS INVÁLIDOS")
"""peso = float(input("Digite o peso do indivíduo: ").replace(",", ".")).lower()).replace("Kg", "")).strip())
altura = float(input("Digite a altura do indivíduo: ").replace(",", ".")).lower()).replace("m", "")).strip())
IMC(peso, altura)"""
|
{"/Aula07.1.py": ["/Aula07.py"]}
|
19,199,321
|
wangechimk/pitch-app
|
refs/heads/main
|
/app/auth/views.py
|
from flask import render_template, url_for, redirect, request,flash
from . import auth
from flask_login import login_user, login_required, logout_user
from .forms import RegForm, LoginForm
from ..models import User
from .. import db
from ..email import mail_message
@auth.route('/login', methods=['POST', 'GET'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, remember=form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or Password')
return render_template('auth/login.html', loginform=LoginForm())
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index"))
@auth.route('/signup', methods=['POST', 'GET'])
def signup():
form = RegForm()
if form.validate_on_submit():
user = User(email=form.email.data, username=form.username.data, password=form.password.data)
user.save_u()
mail_message("Welcome to Pitch-App", "email/welcome_user.html", user.email, user=user)
return redirect(url_for('auth.login'))
return render_template('auth/signup.html', r_form=form)
|
{"/app/auth/views.py": ["/app/__init__.py"], "/app/main/views.py": ["/app/__init__.py"]}
|
19,199,322
|
wangechimk/pitch-app
|
refs/heads/main
|
/migrations/versions/0be36f0d6445_user_profile_migration.py
|
"""user profile migration
Revision ID: 0be36f0d6445
Revises: 70b01ed86089
Create Date: 2021-04-26 23:29:50.066697
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0be36f0d6445'
down_revision = '70b01ed86089'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('bio', sa.String(length=255), nullable=True))
op.add_column('users', sa.Column('profile_pic', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'profile_pic')
op.drop_column('users', 'bio')
# ### end Alembic commands ###
|
{"/app/auth/views.py": ["/app/__init__.py"], "/app/main/views.py": ["/app/__init__.py"]}
|
19,199,323
|
wangechimk/pitch-app
|
refs/heads/main
|
/app/__init__.py
|
from flask import Flask
from config import config_options
from flask_login import LoginManager
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_uploads import UploadSet, configure_uploads, IMAGES
from flask_mail import Mail
app = Flask(__name__)
app.static_folder = 'static'
db = SQLAlchemy()
bootstrap = Bootstrap()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.load_view = 'auth.login'
photos = UploadSet('photos', IMAGES)
mail = Mail()
def create_app(config_name):
app.config.from_object(config_options[config_name])
config_options[config_name].init_app(app)
configure_uploads(app, photos)
from .auth import auth as authentication_blueprint
from .main import main as main_blueprint
#
app.register_blueprint(authentication_blueprint)
app.register_blueprint(main_blueprint)
bootstrap.init_app(app)
login_manager.init_app(app)
db.init_app(app)
mail.init_app(app)
return app
|
{"/app/auth/views.py": ["/app/__init__.py"], "/app/main/views.py": ["/app/__init__.py"]}
|
19,199,324
|
wangechimk/pitch-app
|
refs/heads/main
|
/migrations/versions/70b01ed86089_second_migration.py
|
"""second migration
Revision ID: 70b01ed86089
Revises: c7fd0dd72b48
Create Date: 2021-04-26 22:46:02.031897
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '70b01ed86089'
down_revision = 'c7fd0dd72b48'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('users', 'email',
existing_type=sa.VARCHAR(length=255),
nullable=True)
op.drop_constraint('users_email_key', 'users', type_='unique')
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_users_email'), table_name='users')
op.create_unique_constraint('users_email_key', 'users', ['email'])
op.alter_column('users', 'email',
existing_type=sa.VARCHAR(length=255),
nullable=False)
# ### end Alembic commands ###
|
{"/app/auth/views.py": ["/app/__init__.py"], "/app/main/views.py": ["/app/__init__.py"]}
|
19,199,325
|
wangechimk/pitch-app
|
refs/heads/main
|
/migrations/versions/d64a89476e60_update_profile_migration.py
|
"""update profile migration
Revision ID: d64a89476e60
Revises: 0be36f0d6445
Create Date: 2021-04-26 23:51:45.313989
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd64a89476e60'
down_revision = '0be36f0d6445'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('profile_pic_path', sa.String(), nullable=True))
op.drop_column('users', 'profile_pic')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('profile_pic', sa.VARCHAR(), autoincrement=False, nullable=True))
op.drop_column('users', 'profile_pic_path')
# ### end Alembic commands ###
|
{"/app/auth/views.py": ["/app/__init__.py"], "/app/main/views.py": ["/app/__init__.py"]}
|
19,199,326
|
wangechimk/pitch-app
|
refs/heads/main
|
/tests/user_test.py
|
import unittest
from app.models import User
class UserTest(unittest.TestCase):
def setUp(self):
self.new_user = User(username='wangechi', password='Password')
def test_password_setter(self):
self.assertTrue(self.new_user.password is not None)
|
{"/app/auth/views.py": ["/app/__init__.py"], "/app/main/views.py": ["/app/__init__.py"]}
|
19,199,327
|
wangechimk/pitch-app
|
refs/heads/main
|
/app/main/views.py
|
from flask import Flask, render_template, redirect, url_for, abort, request
from . import main
from flask_login import login_required, current_user
from ..models import User, Pitch, Comment, Upvote,Downvote
from .form import PitchForm, UpdateProfile, CommentForm, UpvoteForm
from .. import db, photos
@main.route('/')
def index():
pitches = Pitch.query.all()
job = Pitch.query.filter_by(category='Job').all()
event = Pitch.query.filter_by(category='Events').all()
advertisement = Pitch.query.filter_by(category='Advertisement').all()
return render_template('index.html', job=job, event=event, pitches=pitches, advertisement=advertisement)
@main.route('/new_pitch', methods=['POST', 'GET'])
@login_required
def new_pitch():
form = PitchForm()
if form.validate_on_submit():
title = form.title.data
post = form.post.data
category = form.category.data
user_id = current_user
print(user_id, 'user_id')
new_pitch = Pitch(title=title, post=post, user_id=current_user._get_current_object().id, category=category)
new_pitch.save_p()
return redirect(url_for('main.index'))
return render_template('create_pitch.html', form=form)
# @main.route('/comment/<int:pitch_id>', methods=['POST', 'GET'])
# @login_required
# def comment(pitch_id):
# form = CommentForm()
# pitch = Pitch.query.get(pitch_id)
# all_comments = Comment.query.filter_by(pitch_id=pitch_id).all()
# if form.validate_on_submit():
# comment = form.comment.data
# pitch_id = pitch_id
# user_id = current_user._get_current_object().id
# new_comment = Comment(comment=comment, user_id=user_id, pitch_id=pitch_id)
#
# new_comment.save_c()
# return redirect(url_for('.comment', pitch_id=pitch_id))
# return render_template('comment.html', form=form, )
@main.route('/user/<name>')
def profile(name):
user = User.query.filter_by(username=name).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user=user)
@main.route('/user/<name>/updateProfile', methods=['POST', 'GET'])
def updateProfile(name):
form = UpdateProfile()
user = User.query.filter_by(username=name).first()
if user is None:
abort(404)
if form.validate_on_submit():
user.bio = form.bio.data
user.save_u()
return redirect(url_for('.profile', name=name))
return render_template('profile/update.html', form=form)
@main.route('/user/<name>/update/pic', methods=['POST'])
@login_required
def update_pic(name):
user = User.query.filter_by(username=name).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile', name=name))
@main.route('/like/<int:id>', methods=['POST', 'GET'])
@login_required
def like(id):
get_pitches = Pitch.get_upvotes(id)
valid_string = f'{current_user.id}:{id}'
for pitch in get_pitches:
to_str = f'{pitch}'
print(valid_string + " " + to_str)
if valid_string == to_str:
return redirect(url_for('main.index', id=id))
else:
continue
new_vote = Upvote(user=current_user, pitch_id=id)
new_vote.save()
return redirect(url_for('main.index', id=id))
@main.route('/dislike/<int:id>', methods=['POST', 'GET'])
@login_required
def dislike(id):
pitch = Pitch.get_downvotes(id)
valid_string = f'{current_user.id}:{id}'
for p in pitch:
to_str = f'{p}'
print(valid_string + " " + to_str)
if valid_string == to_str:
return redirect(url_for('main.index', id=id))
else:
continue
new_downvote = Downvote(user=current_user, pitch_id=id)
new_downvote.save()
return redirect(url_for('main.index', id=id))
@main.route('/comment/<int:pitch_id>', methods=['POST', 'GET'])
@login_required
def comment(pitch_id):
form = CommentForm()
pitch = Pitch.query.get(pitch_id)
all_comments = Comment.query.filter_by(pitch_id=pitch_id).all()
if form.validate_on_submit():
comment = form.comment.data
pitch_id = pitch_id
user_id = current_user._get_current_object().id
new_comment = Comment(comment=comment, user_id=user_id, pitch_id=pitch_id)
new_comment.save_c()
return redirect(url_for('.comment', pitch_id=pitch_id))
return render_template('comment.html', form=form, pitch=pitch, all_comments=all_comments)
|
{"/app/auth/views.py": ["/app/__init__.py"], "/app/main/views.py": ["/app/__init__.py"]}
|
19,199,983
|
sebastiengilbert73/gameDeepRL
|
refs/heads/master
|
/arenas/connect4.py
|
import logging
import argparse
import rules.connect4
import simulation.simulator
import random
import architectures.connect4 as arch
import torch
import sys
import math
parser = argparse.ArgumentParser()
parser.add_argument('NeuralNetworkFilepath', help="The filepath of the neural network")
parser.add_argument('--useCpu', help='Use cpu, even if cuda is available', action='store_true')
parser.add_argument('--conv1NumberOfChannels', help="The number of channels for the 1st convolution. Default: 16", type=int, default=16)
parser.add_argument('--conv2NumberOfChannels', help="The number of channels for the 2nd convolution. Default: 32", type=int, default=32)
parser.add_argument('--dropoutRatio', help="The dropout ratio. Default: 0.5", type=float, default=0.5)
parser.add_argument('--finalDecisionSoftmaxTemperature', help="The softmax temperature for the final decision. Default: 0.0", type=float, default=0.0)
parser.add_argument('--simulationSoftmaxTemperature', help="The softmax temperature for the simulation quick decisions. Default: 1.0", type=float, default=1.0)
parser.add_argument('--numberOfSimulations', help="The number of simulations per position. Default: 30", type=int, default=30)
parser.add_argument('--neuralNetworkPlaysFirst', action='store_true')
parser.add_argument('--displayLegalMoveStatistics', action='store_true')
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG, format='%(asctime)-15s %(message)s')
device = 'cpu'
useCuda = not args.useCpu and torch.cuda.is_available()
if useCuda:
device = 'cuda'
def main():
logging.info("arenas/connect4.py main()\tdevice = {}".format(device))
# Load the neural network
"""neural_net = arch.ConvPredictorDirect(
conv1_number_of_channels=args.conv1NumberOfChannels,
conv2_number_of_channels=args.conv2NumberOfChannels,
dropout_ratio=args.dropoutRatio,
final_decision_softmax_temperature=args.finalDecisionSoftmaxTemperature,
simulation_softmax_temperature=args.simulationSoftmaxTemperature
).to(device)
"""
neural_net = arch.ConvPredictor2Scales(
16, 32, 16, 0.5
)
try:
neural_net.load_state_dict(torch.load(args.NeuralNetworkFilepath))
except:
logging.error("Could not load file '{}'".format(args.NeuralNetworkFilepath))
sys.exit()
neural_net.eval()
logging.info("Neural network loaded")
authority = rules.connect4.Authority()
players = authority.PlayersList()
human_player = players[0]
neural_net_player = players[1]
if args.neuralNetworkPlaysFirst:
human_player = players[1]
neural_net_player = players[0]
position = authority.InitialPosition()
authority.Display(position)
current_player = players[0]
winner = None
while winner is None:
if current_player == human_player:
dropColumn = int(input("Column: "))
position, winner = authority.MoveWithColumn(position, current_player, dropColumn)
else:
legal_move_to_statistics_list = neural_net.LegalMoveStatistics(authority, 42, args.numberOfSimulations, position, current_player)
if args.displayLegalMoveStatistics:
print(legal_move_to_statistics_list)
chosen_move = FinalDecision(legal_move_to_statistics_list, neural_net)
position, winner = authority.MoveWithMoveArrayCoordinates(position, current_player, chosen_move)
authority.Display(position)
current_player = authority.OtherPlayer(current_player)
print ("winner: {}".format(winner))
def FinalDecision(legal_move_to_statistics_list, neural_network):
if neural_network.final_decision_softmax_temperature <= 0: # Hard max
highest_expected_value = -2.0
chosen_move_coordinates = []
for (move, stats) in legal_move_to_statistics_list:
expected_value = (stats[0] - stats[2]) / (stats[0] + stats[1] + stats[2])
if expected_value > highest_expected_value:
highest_expected_value = expected_value
chosen_move_coordinates = [move]
elif expected_value == highest_expected_value:
chosen_move_coordinates.append(move)
if len(chosen_move_coordinates) == 0:
raise ValueError(
"arenas.connect4.FinalDecision(): chosen_move_coordinates is empty. legal_move_to_statistics_list = {}".format(
legal_move_to_statistics_list))
return random.choice(chosen_move_coordinates)
# Softmax
move_to_expected_value_dic = {move: (stats[0] - stats[2]) / (stats[0] + stats[1] + stats[2]) for
(move, stats) in legal_move_to_statistics_list}
# Normalize
sum = 0
move_to_choice_probability = {}
for move, expected_value in move_to_expected_value_dic.items():
sum += math.exp(expected_value / neural_network.final_decision_softmax_temperature)
if sum > 0:
for move, expected_value in move_to_expected_value_dic.items():
move_to_choice_probability[move] = (math.exp(
expected_value / neural_network.final_decision_softmax_temperature)) / sum
else:
raise ValueError(
"arenas.connect4.FinalDecision(): sum of exponentials ({}) is not > 0. move_to_expected_value_dic = {}".format(
sum, move_to_expected_value_dic))
# Draw a random number
random_draw = random.random()
running_sum = 0
for move, probability in move_to_choice_probability.items():
running_sum += probability
if running_sum >= random_draw:
return move
raise RuntimeError(
"arenas.connect4.FinalDecision(): Summed the probabilities without reaching the random number {}. move_to_choice_probability = {}".format(
random_draw, move_to_choice_probability))
if __name__ == '__main__':
main()
|
{"/arenas/connect4.py": ["/rules/connect4.py", "/simulation/simulator.py", "/architectures/connect4.py"], "/rules/connect4.py": ["/rules/authority.py"], "/architectures/connect4.py": ["/simulation/simulator.py"], "/training/learn_tictactoe.py": ["/rules/tictactoe.py", "/simulation/simulator.py"], "/rules/tictactoe.py": ["/rules/authority.py"]}
|
19,199,984
|
sebastiengilbert73/gameDeepRL
|
refs/heads/master
|
/rules/authority.py
|
import abc
class GameAuthority(abc.ABC):
"""
Abstract class that holds the rules of the game
"""
def __init__(self):
super().__init__()
@abc.abstractmethod
def Move(self, currentPositionArr, player, moveArr):
pass # return (positionArr, winner)
@abc.abstractmethod
def MoveWithMoveArrayCoordinates(self, current_position, player, move_array_coordinates):
pass
@abc.abstractmethod
def Winner(self, positionArr, lastPlayerWhoPlayed):
pass
@abc.abstractmethod
def LegalMovesMask(self, positionArr, player):
pass
@abc.abstractmethod
def LegalMoveCoordinates(self, positionArr, player): # List of 4-element lists
pass
@abc.abstractmethod
def PositionArrayShape(self):
pass
@abc.abstractmethod
def MoveArrayShape(self):
pass
@abc.abstractmethod
def InitialPosition(self):
pass
@abc.abstractmethod
def SwapPositions(self, positionArr):
pass
@abc.abstractmethod
def PlayersList(self):
pass
"""@abc.abstractmethod
def MoveWithString(self, currentPositionArr, player, dropCoordinatesAsString):
pass # return (positionArr, winner)
"""
@abc.abstractmethod
def Display(self, positionArr):
pass
@abc.abstractmethod
def RaiseAnErrorIfNoLegalMove(self):
pass
def DisplayGame(self, positionsList):
for position in positionsList:
self.Display(position)
print()
def OtherPlayer(self, player):
players = self.PlayersList()
if not player in players:
raise ValueError("GameAuthority.OtherPlayer(): '{}' is not part of the players list".format(player))
if player == players[0]:
return players[1]
else:
return players[0]
|
{"/arenas/connect4.py": ["/rules/connect4.py", "/simulation/simulator.py", "/architectures/connect4.py"], "/rules/connect4.py": ["/rules/authority.py"], "/architectures/connect4.py": ["/simulation/simulator.py"], "/training/learn_tictactoe.py": ["/rules/tictactoe.py", "/simulation/simulator.py"], "/rules/tictactoe.py": ["/rules/authority.py"]}
|
19,199,985
|
sebastiengilbert73/gameDeepRL
|
refs/heads/master
|
/rules/connect4.py
|
import rules.authority
import numpy as np
import ast
class Authority(rules.authority.GameAuthority):
# Must implement:
# Move(self, currentPositionArr, player, moveArr) y
# MoveWithMoveArrayCoordinates(self, current_position, player, move_array_coordinates) y
# Winner(self, positionArr, lastPlayerWhoPlayed) y
# LegalMovesMask(self, positionArr, player) y
# LegalMoveCoordinates(self, positionArr, player) # List of 4-element lists y
# PositionArrayShape(self) y
# MoveArrayShape(self) y
# InitialPosition(self) y
# SwapPositions(self, positionArr) y
# PlayersList(self) y
# Display(self, positionArr) y
# RaiseAnErrorIfNoLegalMove(self) y
#
def __init__(self, numberOfRows=6, numberOfColumns=7):
super().__init__()
if numberOfColumns < 4 or numberOfRows < 4:
raise ValueError("Authority.__init__(): The number of rows ({}) and the number of columns ({}) must be at least 4".format(numberOfRows, numberOfColumns))
self.playersList = ['yellow', 'red']
self.positionArrayShape = (2, 1, numberOfRows, numberOfColumns)
self.moveArrayShape = (1, 1, 1, numberOfColumns)
self.playerToPlaneIndexDic = {'yellow': 0, 'red': 1}
self.numberOfRows = numberOfRows
self.numberOfColumns = numberOfColumns
def PlayersList(self):
return self.playersList
def ThereIs4InARow(self, planeNdx, positionArray):
if positionArray.shape != self.positionArrayShape: # (C, D, H, W)
raise ValueError("Authority.ThereIs4InARow(): The shape of positionArray ({}) is not {}".format(positionArray.shape, self.positionArrayShape))
for row in range(self.numberOfRows):
for column in range(self.numberOfColumns):
# Left end of a horizontal line
if column < self.numberOfColumns - 3:
thereIsAHorizontalLine = True
deltaColumn = 0
while deltaColumn < 4 and thereIsAHorizontalLine:
if positionArray[planeNdx, 0, row , column + deltaColumn] != 1:
thereIsAHorizontalLine = False
deltaColumn += 1
if thereIsAHorizontalLine:
return True
# Upper end of a vertical line
if row < self.numberOfRows - 3:
thereIsAVerticalLine = True
deltaRow = 0
while deltaRow < 4 and thereIsAVerticalLine:
if positionArray[planeNdx, 0, row + deltaRow, column] != 1:
thereIsAVerticalLine = False
deltaRow += 1
if thereIsAVerticalLine:
return True
# North-West end of a \
if row < self.numberOfRows - 3 and column < self.numberOfColumns - 3:
thereIsABackSlash = True
deltaRowColumn = 0
while deltaRowColumn < 4 and thereIsABackSlash:
if positionArray[planeNdx, 0, row + deltaRowColumn, column + deltaRowColumn] != 1:
thereIsABackSlash = False
deltaRowColumn += 1
if thereIsABackSlash:
return True
# North-East end of a /
if row < self.numberOfRows - 3 and column >= 3:
thereIsASlash = True
deltaRowColumn = 0
while deltaRowColumn < 4 and thereIsASlash:
if positionArray[planeNdx, 0, row + deltaRowColumn, column - deltaRowColumn] != 1:
thereIsASlash = False
deltaRowColumn += 1
if thereIsASlash:
return True
# Otherwise
return False
def MoveWithColumn(self, currentPositionArr, player, dropColumn):
if currentPositionArr.shape != self.positionArrayShape: # (C, D, H, W)
raise ValueError("Authority.MoveWithColumn(): The shape of currentPositionArr {} is not {}".format(currentPositionArr.shape, self.positionArrayShape))
if dropColumn >= self.numberOfColumns:
raise ValueError("Authority.MoveWithColumn(): dropColumn ({}) is >= self.numberOfColumns ({})".format(dropColumn, self.numberOfColumns))
topAvailableRow = self.TopAvailableRow(currentPositionArr, dropColumn)
if topAvailableRow == None:
raise ValueError(
"Authority.MoveWithColumn(): Attempt to drop in column {}, while it is already filled".format(
dropColumn))
newPositionArr = np.copy(currentPositionArr)
newPositionArr[self.playerToPlaneIndexDic[player], 0, topAvailableRow, dropColumn] = 1.0
winner = self.Winner(newPositionArr, player)
return newPositionArr, winner
def TopAvailableRow(self, positionArr, dropColumn):
# Must return None if the column is already filled
# Check the bottom row
if positionArr[0, 0, self.numberOfRows - 1, dropColumn] == 0 and \
positionArr[1, 0, self.numberOfRows - 1, dropColumn] == 0:
return self.numberOfRows - 1
highestOneRow = self.numberOfRows - 1
for row in range(self.numberOfRows - 2, -1, -1): # Count backward: 4, 3, 2, 1, 0
if positionArr[0, 0, row, dropColumn] > 0 or \
positionArr[1, 0, row, dropColumn] > 0:
highestOneRow = row
if highestOneRow == 0: # The column is already filled
return None
else:
return highestOneRow - 1
def Move(self, currentPositionArr, player, moveArr):
if moveArr.shape != self.moveArrayShape:
raise ValueError("Authority.Move(): moveArr.shape ({}) is not {}".format(moveArr.shape, self.moveArrayShape))
numberOfOnes = 0
dropColumn = None
for column in range(self.numberOfColumns):
if moveArr[0, 0, 0, column] == 1:
numberOfOnes += 1
dropColumn = column
if numberOfOnes != 1:
raise ValueError("Authority.Move(): The number of ones in moveArr ({}) is not one".format(numberOfOnes))
return self.MoveWithColumn(currentPositionArr, player, dropColumn)
def InitialPosition(self):
initialPosition = np.zeros(self.positionArrayShape)
return initialPosition
def MoveArrayShape(self):
return self.moveArrayShape
def PositionArrayShape(self):
return self.positionArrayShape
def Winner(self, positionArray, lastPlayerWhoPlayed):
lastPlayerPlane = self.playerToPlaneIndexDic[lastPlayerWhoPlayed]
if self.ThereIs4InARow(lastPlayerPlane, positionArray):
return lastPlayerWhoPlayed
else:
if np.count_nonzero(positionArray) == self.numberOfRows * self.numberOfColumns: # All spots are occupied
return 'draw'
else:
return None
def LegalMovesMask(self, positionArray, player):
if positionArray.shape != self.positionArrayShape:
raise ValueError("Authority.LegalMovesMask(): The shape of positionArray ({}) is not {}".format(
positionArray.shape, self.positionArrayShape))
legalMovesMask = np.ones(self.moveArrayShape, dtype=np.int8) # Initialized with ones, i.e legal moves
for column in range(self.numberOfColumns):
if positionArray[0, 0, 0, column] != 0 or positionArray[1, 0, 0, column] != 0:
legalMovesMask[0, 0, 0, column] = 0
return legalMovesMask
def SwapPositions(self, positionArr):
swappedPosition = np.zeros(self.positionArrayShape)
swappedPosition[[0, 1], :] = positionArr[[1, 0], :] # Cf. https://www.w3resource.com/python-exercises/numpy/python-numpy-exercise-150.php
return swappedPosition
def Display(self, positionArr):
planeNdxToSymbolDic = {0: 'y', 1: 'r'}
for row in range(self.numberOfRows):
for column in range(self.numberOfColumns):
if positionArr[0, 0, row, column] > 0:
print ('{} '.format(planeNdxToSymbolDic[0]), end='')
elif positionArr[1, 0, row, column] > 0:
print ('{} '.format(planeNdxToSymbolDic[1]), end='')
else:
print ('. ', end='')
print('\n')
print("**************")
def RaiseAnErrorIfNoLegalMove(self):
return True
def MoveWithMoveArrayCoordinates(self, current_position, player, move_array_coordinates):
dropColumn = move_array_coordinates[3]
return self.MoveWithColumn(current_position, player, dropColumn)
def LegalMoveCoordinates(self, positionArr, player): # List of 4-element lists
legal_move_coordinates = []
for columnNdx in range(self.numberOfColumns):
if positionArr[0, 0, 0, columnNdx] == 0 and positionArr[1, 0, 0, columnNdx] == 0:
legal_move_coordinates.append([0, 0, 0, columnNdx])
return legal_move_coordinates
|
{"/arenas/connect4.py": ["/rules/connect4.py", "/simulation/simulator.py", "/architectures/connect4.py"], "/rules/connect4.py": ["/rules/authority.py"], "/architectures/connect4.py": ["/simulation/simulator.py"], "/training/learn_tictactoe.py": ["/rules/tictactoe.py", "/simulation/simulator.py"], "/rules/tictactoe.py": ["/rules/authority.py"]}
|
19,199,986
|
sebastiengilbert73/gameDeepRL
|
refs/heads/master
|
/architectures/connect4.py
|
import torch
import numpy as np
import simulation.simulator
import math
import random
class AuxiliaryRegressorConv(torch.nn.Module):
def __init__(self, input_size_CDHW, output_size, dropout_ratio):
super(AuxiliaryRegressorConv, self).__init__()
self.number_of_inputs = input_size_CDHW[0] * input_size_CDHW[1] * input_size_CDHW[2] * input_size_CDHW[3]
self.dropout = torch.nn.Dropout(p=dropout_ratio)
self.linear = torch.nn.Linear(self.number_of_inputs, output_size)
def forward(self, x):
# x.shape = (N, C, D, H, W)
activation = x.view(-1, self.number_of_inputs)
activation = self.dropout(activation)
outputTsr = torch.clip(self.linear(activation), 0, 1) # outputTsr.shape = (N, output_size)
outputTsr = torch.nn.functional.normalize(outputTsr, p=1, dim=1)
return outputTsr
class AuxiliaryRegressorLinear(torch.nn.Module):
def __init__(self, input_size, output_size, dropout_ratio):
super(AuxiliaryRegressorLinear, self).__init__()
self.number_of_inputs = input_size
self.dropout = torch.nn.Dropout(p=dropout_ratio)
self.linear = torch.nn.Linear(self.number_of_inputs, output_size)
def forward(self, x):
# x.shape = (N, self.number_of_inputs)
activation = self.dropout(x)
outputTsr = torch.clip(self.linear(activation), 0, 1) # outputTsr.shape = (N, output_size)
outputTsr = torch.nn.functional.normalize(outputTsr, p=1, dim=1)
return outputTsr
class ConvPredictor(torch.nn.Module, simulation.simulator.Simulator):
def __init__(self, conv1_number_of_channels, conv2_number_of_channels,
hidden_size, dropout_ratio=0.5, final_decision_softmax_temperature=0.0, simulation_softmax_temperature=1.0):
super(ConvPredictor, self).__init__()
self.conv1 = torch.nn.Conv3d(2, conv1_number_of_channels, (1, 3, 3))
self.conv2 = torch.nn.Conv3d(conv1_number_of_channels, conv2_number_of_channels, (1, 3, 3))
self.dropout3d = torch.nn.Dropout3d(p=dropout_ratio)
self.fc1 = torch.nn.Linear(conv2_number_of_channels * 3 * 2, hidden_size)
#self.fc2 = torch.nn.Linear(hidden_size, 3)
self.dropout = torch.nn.Dropout(p=dropout_ratio)
self.conv1_number_of_channels = conv1_number_of_channels
self.conv2_number_of_channels = conv2_number_of_channels
self.hidden_size = hidden_size
self.pred1 = AuxiliaryRegressorConv((self.conv1_number_of_channels, 1, 4, 5), 3, dropout_ratio)
self.pred2 = AuxiliaryRegressorConv((self.conv2_number_of_channels, 1, 2, 3), 3, dropout_ratio)
self.pred3 = AuxiliaryRegressorLinear(self.hidden_size, 3, dropout_ratio)
self.final_decision_softmax_temperature = final_decision_softmax_temperature
self.simulation_softmax_temperature = simulation_softmax_temperature
def forward(self, x):
# x.shape = (N, 2, 1, 6, 7)
activation1 = torch.nn.functional.relu(self.conv1(x))
# activation1.shape = (N, c1, 1, 4, 5)
activation1 = self.dropout3d(activation1)
activation2 = torch.nn.functional.relu(self.conv2(activation1))
# activation2.shape = (N, c2, 1, 2, 3)
activation2_flat = activation2.view(-1, self.conv2_number_of_channels * 3 * 2)
# activation2.shape = (N, c2 * 3 * 2)
hidden = torch.nn.functional.relu(self.fc1(activation2_flat))
# hidden.shape = (N, h)
regression1 = self.pred1(activation1)
regression2 = self.pred2(activation2)
regression3 = self.pred3(hidden)
return (regression1, regression2, regression3)
def ChooseMoveCoordinatesQuick(self, authority, position, player):
legal_move_coordinates = authority.LegalMoveCoordinates(position, player)
move_to_choice_probability = {}
other_player = authority.OtherPlayer(player)
for move_coordinates in legal_move_coordinates:
#print ("ConvPredictor.ChooseMoveCoordinates(): move_coordinates = {}".format(move_coordinates))
resulting_position, winner = authority.MoveWithMoveArrayCoordinates(position, player, move_coordinates)
if player == 'red':
resulting_position = authority.SwapPositions(resulting_position)
move_coordinates = tuple(move_coordinates) # To make it hashable
if winner == player:
return move_coordinates
elif winner == other_player:
move_to_choice_probability[move_coordinates] = -1
elif winner == 'draw':
move_to_choice_probability[move_coordinates] = 0
else:
resulting_position_tsr = torch.tensor(resulting_position, dtype=torch.float).unsqueeze(0)
# print ("ConvPredictor.ChooseMoveCoordinates(): resulting_position_tsr = {}".format(resulting_position_tsr))
predictionTsr = self.forward(resulting_position_tsr)[-1].squeeze(0)
expected_value = predictionTsr[0].item() - predictionTsr[2].item()
move_to_choice_probability[move_coordinates] = expected_value
#print("ConvPredictor.ChooseMoveCoordinates(): expected_value = {}".format(move_to_choice_probability[move_coordinates]))
softmax_temperature = self.simulation_softmax_temperature # Quick decision
if softmax_temperature <= 0: # Hard max
highest_expected_value = -2.0
chosen_move_coordinates = []
for move, expected_value in move_to_choice_probability.items():
if expected_value > highest_expected_value:
highest_expected_value = expected_value
chosen_move_coordinates = [move]
elif expected_value == highest_expected_value:
chosen_move_coordinates.append(move)
return random.choice(chosen_move_coordinates)
# print("ConvPredictor.ChooseMoveCoordinates(): Before normalization, move_to_choice_probability = \n{}".format(move_to_choice_probability))
# Normalize
sum = 0
for move, expected_value in move_to_choice_probability.items():
sum += math.exp(expected_value/softmax_temperature)
for move, expected_value in move_to_choice_probability.items():
move_to_choice_probability[move] = (math.exp(expected_value/softmax_temperature) )/sum
#print("ConvPredictor.ChooseMoveCoordinates(): move_to_choice_probability = \n{}".format(move_to_choice_probability))
# Draw a random number
random_draw = random.random()
running_sum = 0
for move, probability in move_to_choice_probability.items():
running_sum += probability
if running_sum >= random_draw:
return move
raise RuntimeError("ConvPredictor.ChooseMoveCoordinates(): Summed the probabilities without reaching the random number {}".format(random_draw))
def SetSimulationSoftmaxTemperature(self, temperature):
self.simulation_softmax_temperature = temperature
class ConvPredictorDirect(torch.nn.Module, simulation.simulator.Simulator):
def __init__(self, conv1_number_of_channels, conv2_number_of_channels,
dropout_ratio=0.5, final_decision_softmax_temperature=0.0, simulation_softmax_temperature=1.0):
super(ConvPredictorDirect, self).__init__()
self.conv1 = torch.nn.Conv3d(2, conv1_number_of_channels, (1, 3, 3))
self.conv2 = torch.nn.Conv3d(conv1_number_of_channels, conv2_number_of_channels, (1, 3, 3))
self.dropout3d = torch.nn.Dropout3d(p=dropout_ratio)
self.dropout = torch.nn.Dropout(p=dropout_ratio)
self.conv1_number_of_channels = conv1_number_of_channels
self.conv2_number_of_channels = conv2_number_of_channels
self.pred1 = AuxiliaryRegressorConv((self.conv1_number_of_channels, 1, 4, 5), 3, dropout_ratio)
self.pred2 = AuxiliaryRegressorConv((self.conv2_number_of_channels, 1, 2, 3), 3, dropout_ratio)
self.final_decision_softmax_temperature = final_decision_softmax_temperature
self.simulation_softmax_temperature = simulation_softmax_temperature
def forward(self, x):
# x.shape = (N, 2, 1, 3, 3)
activation1 = torch.nn.functional.relu(self.conv1(x))
# activation1.shape = (N, c1, 1, 4, 5)
activation1 = self.dropout3d(activation1)
activation2 = torch.nn.functional.relu(self.conv2(activation1))
# activation2.shape = (N, c2, 1, 2, 3)
regression1 = self.pred1(activation1)
regression2 = self.pred2(activation2)
return (regression1, regression2)
def ChooseMoveCoordinatesQuick(self, authority, position, player):
legal_move_coordinates = authority.LegalMoveCoordinates(position, player)
move_to_choice_probability = {}
other_player = authority.OtherPlayer(player)
for move_coordinates in legal_move_coordinates:
#print ("ConvPredictor.ChooseMoveCoordinates(): move_coordinates = {}".format(move_coordinates))
resulting_position, winner = authority.MoveWithMoveArrayCoordinates(position, player, move_coordinates)
if player == 'red':
resulting_position = authority.SwapPositions(resulting_position)
move_coordinates = tuple(move_coordinates) # To make it hashable
if winner == player:
return move_coordinates
elif winner == other_player:
move_to_choice_probability[move_coordinates] = -1
elif winner == 'draw':
move_to_choice_probability[move_coordinates] = 0
else:
resulting_position_tsr = torch.tensor(resulting_position, dtype=torch.float).unsqueeze(0)
# print ("ConvPredictor.ChooseMoveCoordinates(): resulting_position_tsr = {}".format(resulting_position_tsr))
predictionTsr = self.forward(resulting_position_tsr)[-1].squeeze(0)
expected_value = predictionTsr[0].item() - predictionTsr[2].item()
move_to_choice_probability[move_coordinates] = expected_value
#print("ConvPredictor.ChooseMoveCoordinates(): expected_value = {}".format(move_to_choice_probability[move_coordinates]))
softmax_temperature = self.simulation_softmax_temperature # Quick decision
if softmax_temperature <= 0: # Hard max
highest_expected_value = -2.0
chosen_move_coordinates = []
for move, expected_value in move_to_choice_probability.items():
if expected_value > highest_expected_value:
highest_expected_value = expected_value
chosen_move_coordinates = [move]
elif expected_value == highest_expected_value:
chosen_move_coordinates.append(move)
return random.choice(chosen_move_coordinates)
# print("ConvPredictor.ChooseMoveCoordinates(): Before normalization, move_to_choice_probability = \n{}".format(move_to_choice_probability))
# Normalize
sum = 0
for move, expected_value in move_to_choice_probability.items():
sum += math.exp(expected_value/softmax_temperature)
for move, expected_value in move_to_choice_probability.items():
move_to_choice_probability[move] = (math.exp(expected_value/softmax_temperature) )/sum
# Draw a random number
random_draw = random.random()
running_sum = 0
for move, probability in move_to_choice_probability.items():
running_sum += probability
if running_sum >= random_draw:
return move
raise RuntimeError("ConvPredictorDirect.ChooseMoveCoordinates(): Summed the probabilities without reaching the random number {}".format(random_draw))
def SetSimulationSoftmaxTemperature(self, temperature):
self.simulation_softmax_temperature = temperature
class ConvPredictor2Scales(torch.nn.Module, simulation.simulator.Simulator):
def __init__(self, conv1a_number_of_channels, conv2a_number_of_channels, conv1b_number_of_channels,
dropout_ratio=0.5, final_decision_softmax_temperature=0.0, simulation_softmax_temperature=1.0):
super(ConvPredictor2Scales, self).__init__()
self.conv1a = torch.nn.Conv3d(2, conv1a_number_of_channels, (1, 3, 3))
self.conv2a = torch.nn.Conv3d(conv1a_number_of_channels, conv2a_number_of_channels, (1, 3, 3))
self.conv1b = torch.nn.Conv3d(2, conv1b_number_of_channels, (1, 3, 3), dilation=2)
self.dropout3d = torch.nn.Dropout3d(p=dropout_ratio)
self.dropout = torch.nn.Dropout(p=dropout_ratio)
self.conv1a_number_of_channels = conv1a_number_of_channels
self.conv2a_number_of_channels = conv2a_number_of_channels
self.conv1b_number_of_channels = conv1b_number_of_channels
self.flattened_activation_size = self.conv2a_number_of_channels * 2 * 3 + self.conv1b_number_of_channels * 2 * 3
self.pred1a = AuxiliaryRegressorConv((self.conv1a_number_of_channels, 1, 4, 5), 3, dropout_ratio)
self.pred2a = AuxiliaryRegressorConv((self.conv2a_number_of_channels, 1, 2, 3), 3, dropout_ratio)
self.pred1b = AuxiliaryRegressorConv((self.conv1b_number_of_channels, 1, 2, 3), 3, dropout_ratio)
self.pred_flattened = AuxiliaryRegressorLinear(self.flattened_activation_size, 3, dropout_ratio)
self.final_decision_softmax_temperature = final_decision_softmax_temperature
self.simulation_softmax_temperature = simulation_softmax_temperature
def forward(self, x):
# x.shape = (N, 2, 1, 3, 3)
activation1a = torch.nn.functional.relu(self.conv1a(x))
# activation1a.shape = (N, c1a, 1, 4, 5)
activation1a = self.dropout3d(activation1a)
activation2a = torch.nn.functional.relu(self.conv2a(activation1a))
# activation2a.shape = (N, c2a, 1, 2, 3)
activation1b = torch.nn.functional.relu(self.conv1b(x))
# activation1b.shape = (N, c1b, 2, 3)
flattened_activation_2a = activation2a.view(-1, self.conv2a_number_of_channels * 2 * 3)
flattened_activation_1b = activation1b.view(-1, self.conv1b_number_of_channels * 2 * 3)
flattened_activation = torch.cat([flattened_activation_2a, flattened_activation_1b], dim=1)
regression1a = self.pred1a(activation1a)
regression2a = self.pred2a(activation2a)
regression1b = self.pred1b(activation1b)
regression_flattened = self.pred_flattened(flattened_activation)
return (regression1a, regression2a, regression1b, regression_flattened)
def ChooseMoveCoordinatesQuick(self, authority, position, player):
legal_move_coordinates = authority.LegalMoveCoordinates(position, player)
move_to_choice_probability = {}
other_player = authority.OtherPlayer(player)
for move_coordinates in legal_move_coordinates:
resulting_position, winner = authority.MoveWithMoveArrayCoordinates(position, player, move_coordinates)
if player == 'red':
resulting_position = authority.SwapPositions(resulting_position)
move_coordinates = tuple(move_coordinates) # To make it hashable
if winner == player:
return move_coordinates
elif winner == other_player:
move_to_choice_probability[move_coordinates] = -1
elif winner == 'draw':
move_to_choice_probability[move_coordinates] = 0
else:
resulting_position_tsr = torch.tensor(resulting_position, dtype=torch.float).unsqueeze(0)
predictionTsr = self.forward(resulting_position_tsr)[-1].squeeze(0)
expected_value = predictionTsr[0].item() - predictionTsr[2].item()
move_to_choice_probability[move_coordinates] = expected_value
softmax_temperature = self.simulation_softmax_temperature # Quick decision
if softmax_temperature <= 0: # Hard max
highest_expected_value = -2.0
chosen_move_coordinates = []
for move, expected_value in move_to_choice_probability.items():
if expected_value > highest_expected_value:
highest_expected_value = expected_value
chosen_move_coordinates = [move]
elif expected_value == highest_expected_value:
chosen_move_coordinates.append(move)
return random.choice(chosen_move_coordinates)
# Normalize
sum = 0
for move, expected_value in move_to_choice_probability.items():
sum += math.exp(expected_value/softmax_temperature)
for move, expected_value in move_to_choice_probability.items():
move_to_choice_probability[move] = (math.exp(expected_value/softmax_temperature) )/sum
# Draw a random number
random_draw = random.random()
running_sum = 0
for move, probability in move_to_choice_probability.items():
running_sum += probability
if running_sum >= random_draw:
return move
raise RuntimeError("ConvPredictor2Scales.ChooseMoveCoordinates(): Summed the probabilities without reaching the random number {}".format(random_draw))
def SetSimulationSoftmaxTemperature(self, temperature):
self.simulation_softmax_temperature = temperature
|
{"/arenas/connect4.py": ["/rules/connect4.py", "/simulation/simulator.py", "/architectures/connect4.py"], "/rules/connect4.py": ["/rules/authority.py"], "/architectures/connect4.py": ["/simulation/simulator.py"], "/training/learn_tictactoe.py": ["/rules/tictactoe.py", "/simulation/simulator.py"], "/rules/tictactoe.py": ["/rules/authority.py"]}
|
19,199,987
|
sebastiengilbert73/gameDeepRL
|
refs/heads/master
|
/training/learn_tictactoe.py
|
import logging
import argparse
import torch
from torch.utils.data import Dataset, DataLoader
import rules.tictactoe
import simulation.simulator
import random
import architectures.tictactoe as arch
import os
import copy
parser = argparse.ArgumentParser()
parser.add_argument('--useCpu', help='Use cpu, even if cuda is available', action='store_true')
parser.add_argument('--conv1NumberOfChannels', help="The number of channels for the 1st convolution. Default: 16", type=int, default=16)
parser.add_argument('--conv2NumberOfChannels', help="The number of channels for the 2nd convolution. Default: 32", type=int, default=32)
parser.add_argument('--hiddenSize', help="The size of the hidden layer. Default: 64", type=int, default=64)
parser.add_argument('--dropoutRatio', help="The dropout ratio. Default: 0.5", type=float, default=0.5)
parser.add_argument('--learningRate', help='The learning rate. Default: 0.0001', type=float, default=0.0001)
parser.add_argument('--weightDecay', help="The weight decay. Default: 0.0001", type=float, default=0.0001)
parser.add_argument('--numberOfEpochs', help='Number of epochs. Default: 200', type=int, default=200)
parser.add_argument('--numberOfTrainingPositions', help="The number of positions for training. Default: 1000", type=int, default=1000)
parser.add_argument('--numberOfSimulations', help="The number of simulations per position. Default: 100", type=int, default=100)
parser.add_argument('--minibatchSize', help='The minibatch size. Default: 16', type=int, default=16)
parser.add_argument('--outputDirectory', help="The directory where the output data will be written. Default: './outputs'", default='./outputs')
parser.add_argument('--modelFilepathPrefix', help="The model filepath prefix. Default: './outputs/ConvPredictor_'", default='./outputs/ConvPredictor_')
parser.add_argument('--faceOffNumberOfSimulations', help="When playing against a random player, the number of simulations per position. Default: 10", type=int, default=10)
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG, format='%(asctime)-15s %(message)s')
device = 'cpu'
useCuda = not args.useCpu and torch.cuda.is_available()
if useCuda:
device = 'cuda'
class PositionStats(Dataset):
def __init__(self, player_simulator, opponent_simulator, number_of_positions,
maximum_number_of_moves=9, number_of_simulations=100):
super().__init__()
self.player_simulator = player_simulator
self.opponent_simulator = opponent_simulator
self.position_stats_pairs = []
self.authority = rules.tictactoe.Authority()
self.maximum_number_of_moves = maximum_number_of_moves
random_player = simulation.simulator.RandomSimulator()
authority = rules.tictactoe.Authority()
for positionNdx in range(number_of_positions):
# Generate a game
#positionsList, winner = self.player_simulator.SimulateAsymmetricGame(
# self.authority, self.opponent_simulator, self.maximum_number_of_moves)
positionsList, winner = random_player.SimulateGame(authority, maximum_number_of_moves)
# Select a position from the list
startNdx = random.randint(0, len(positionsList) - 2)
positionsList = positionsList[: startNdx + 1]
if len(positionsList) % 2 == 0:
starting_position = positionsList[-1]
else:
starting_position = self.authority.SwapPositions(positionsList[-1])
# Run simulations
number_of_wins = 0
number_of_draws = 0
number_of_losses = 0
for simulationNdx in range(number_of_simulations):
sim_positions, sim_winner = self.opponent_simulator.SimulateAsymmetricGame(
self.authority, self.player_simulator, self.maximum_number_of_moves,
starting_position, starting_player='O')
if sim_winner == 'X':
number_of_wins += 1
elif sim_winner == 'draw':
number_of_draws += 1
else:
number_of_losses += 1
self.position_stats_pairs.append((starting_position,
(number_of_wins/number_of_simulations, number_of_draws/number_of_simulations, number_of_losses/number_of_simulations)))
def __len__(self):
return len(self.position_stats_pairs)
def __getitem__(self, idx):
position, stats = self.position_stats_pairs[idx]
return (torch.tensor(position, dtype=torch.float), torch.tensor([stats[0], stats[1], stats[2]]))
def main():
logging.info("learn_tictactoe.py main()")
authority = rules.tictactoe.Authority()
opponent_simulator = simulation.simulator.RandomSimulator()
player_simulator = simulation.simulator.RandomSimulator()
# Create a neural network
neural_net = arch.ConvPredictorDirect(
conv1_number_of_channels=args.conv1NumberOfChannels,
conv2_number_of_channels=args.conv2NumberOfChannels,
#hidden_size=args.hiddenSize,
dropout_ratio=args.dropoutRatio,
final_decision_softmax_temperature=0.0,
simulation_softmax_temperature=1.0
).to(device)
logging.info("Creating training and validation datasets...")
training_dataset = PositionStats(
player_simulator=player_simulator,
opponent_simulator=opponent_simulator,
number_of_positions=args.numberOfTrainingPositions,
maximum_number_of_moves=9,
number_of_simulations=args.numberOfSimulations
)
logging.info("Finished creating training dataset")
number_of_validation_positions = int(0.25 * args.numberOfTrainingPositions)
validation_dataset = PositionStats(
player_simulator=player_simulator,
opponent_simulator=opponent_simulator,
number_of_positions=number_of_validation_positions,
maximum_number_of_moves=9,
number_of_simulations=args.numberOfSimulations
)
logging.info("Finished creating validation dataset")
# Create the data loaders
training_loader = torch.utils.data.DataLoader(training_dataset, batch_size=args.minibatchSize,
shuffle=True, num_workers=2)
validation_loader = torch.utils.data.DataLoader(validation_dataset, batch_size=args.minibatchSize,
shuffle=True, num_workers=2)
# Create the optimizer
optimizer = torch.optim.Adam(neural_net.parameters(), lr=args.learningRate, betas=(0.5, 0.999),
weight_decay=args.weightDecay)
# Loss function
lossFcn = torch.nn.MSELoss()
# Output monitoring file
epochLossFile = open(os.path.join(args.outputDirectory, 'epochLoss.csv'), "w",
buffering=1) # Flush the buffer at each line
epochLossFile.write("epoch,trainingLoss,validationLoss\n")
number_of_superepochs = 10
for superepoch in range(1, number_of_superepochs + 1):
logging.info ("****** Superepoch {} ******".format(superepoch))
lowest_validation_loss = 1.0e9
for epoch in range(1, args.numberOfEpochs + 1):
#print ("epoch {}".format(epoch))
# Set the neural network to training mode
neural_net.train()
loss_sum = 0
for starting_position_tsr, training_target_stats_tsr in training_loader:
print('.', end='', flush=True)
starting_position_tsr, training_target_stats_tsr = starting_position_tsr.to(device), training_target_stats_tsr.to(device)
optimizer.zero_grad()
prediction_tsr = neural_net(starting_position_tsr)
loss_0 = lossFcn(prediction_tsr[0], training_target_stats_tsr)
loss_1 = lossFcn(prediction_tsr[1], training_target_stats_tsr)
#loss_2 = lossFcn(prediction_tsr[2], training_target_stats_tsr)
loss = 0.3333 * loss_0 + 0.3333 * loss_1 #+ 0.3333 * loss_2
loss.backward()
optimizer.step()
loss_sum += loss.item()
training_loss = loss_sum/training_dataset.__len__()
print(' ', end='', flush=True)
# Validation
with torch.no_grad():
neural_net.eval()
validation_loss_sum = 0
for starting_position_tsr, validation_target_stats_tsr in validation_loader:
starting_position_tsr, validation_target_stats_tsr = starting_position_tsr.to(device), validation_target_stats_tsr.to(device)
prediction_tsr = neural_net(starting_position_tsr)
#loss = lossFcn(prediction_tsr[2], validation_target_stats_tsr)
loss = lossFcn(prediction_tsr[1], validation_target_stats_tsr)
validation_loss_sum += loss.item()
validation_loss = validation_loss_sum/validation_dataset.__len__()
if superepoch == number_of_superepochs and validation_loss < lowest_validation_loss:
lowest_validation_loss = validation_loss
model_filepath = args.modelFilepathPrefix + str(args.conv1NumberOfChannels) + '_' + str(
args.conv2NumberOfChannels) + '_' + str(args.hiddenSize) + '_' + str(
args.dropoutRatio) + '_' + str(superepoch) + '_' + "{:.4f}".format(validation_loss) + '.pth'
torch.save(neural_net.state_dict(), model_filepath)
if epoch % 50 == 1 or epoch == args.numberOfEpochs:
print('\n')
logging.info("Epoch {}: training_loss = {:.6f} validation_loss = {:.6f}".format(epoch, training_loss, validation_loss))
epochLossFile.write("{},{},{}\n".format((superepoch - 1) * args.numberOfEpochs + (epoch - 1), training_loss, validation_loss))
print("")
# Play against a random player
logging.info("Playing against a random player...")
#neural_net.SetSoftmaxTemperature(1.0)
games_list, number_of_wins, number_of_draws, number_of_losses = PlayAgainstRandomPlayer(
neural_net, 20, authority
)
logging.info("Superepoch {}: Against a random player: number_of_wins = {} number_of_draws = {} number_of_losses = {}".format(superepoch, number_of_wins, number_of_draws, number_of_losses))
#DisplayLostGame(authority, games_list)
# Recompute the datasets
player_simulator = copy.deepcopy(neural_net)
#player_simulator.SetSoftmaxTemperature(2.0)
#opponent_simulator = simulation.simulator.RandomSimulator() # Could be another copy of neural_net, with a different softmax temperature
opponent_simulator = copy.deepcopy(neural_net)
opponent_simulation_softmax_temperature = 0.3 * (number_of_superepochs - superepoch) + 1.0
opponent_simulator.SetSimulationSoftmaxTemperature(opponent_simulation_softmax_temperature)
logging.info("Creating training and validation datasets... opponent_simulation_softmax_temperature = {}".format(opponent_simulation_softmax_temperature))
number_of_training_positions = args.numberOfTrainingPositions
if superepoch == number_of_superepochs - 1:
number_of_training_positions = 10 * args.numberOfTrainingPositions
number_of_validation_positions = int(0.25 * number_of_training_positions)
training_dataset = PositionStats(
player_simulator=player_simulator,
opponent_simulator=opponent_simulator,
number_of_positions=number_of_training_positions,
maximum_number_of_moves=9,
number_of_simulations=args.numberOfSimulations
)
logging.info("Finished creating training dataset")
validation_dataset = PositionStats(
player_simulator=player_simulator,
opponent_simulator=opponent_simulator,
number_of_positions=number_of_validation_positions,
maximum_number_of_moves=9,
number_of_simulations=args.numberOfSimulations
)
logging.info("Finished creating validation dataset")
training_loader = torch.utils.data.DataLoader(training_dataset, batch_size=args.minibatchSize,
shuffle=True, num_workers=2)
validation_loader = torch.utils.data.DataLoader(validation_dataset, batch_size=args.minibatchSize,
shuffle=True, num_workers=2)
model_filepath = args.modelFilepathPrefix + str(args.conv1NumberOfChannels) + '_' + str(args.conv2NumberOfChannels) + '_' + str(args.hiddenSize) + '_' + str(args.dropoutRatio) + '_' + str(superepoch) + '.pth'
torch.save(neural_net.state_dict(), model_filepath)
def PlayAgainstRandomPlayer(player_simulator, number_of_games, authority):
number_of_wins = 0
number_of_draws = 0
number_of_losses = 0
games_list = []
random_simulator = simulation.simulator.RandomSimulator()
for gameNdx in range(number_of_games):
positionsList = None
winner = None
if gameNdx % 2 == 0:
positionsList, winner = player_simulator.SimulateAsymmetricGameMonteCarlo(
authority=authority,
other_player_simulator=random_simulator,
maximum_number_of_moves=9,
number_of_simulations=args.faceOffNumberOfSimulations,
starting_position=None,
starting_player='X'
)
else:
positionsList, winner = random_simulator.SimulateAsymmetricGameMonteCarlo(
authority=authority,
other_player_simulator=player_simulator,
maximum_number_of_moves=9,
number_of_simulations=args.faceOffNumberOfSimulations,
starting_position=None,
starting_player='O'
)
games_list.append((positionsList, winner))
display_character = '-'
if winner == 'X':
number_of_wins += 1
display_character = 'X'
elif winner == 'O':
number_of_losses += 1
display_character = 'O'
else:
number_of_draws += 1
print('{}'.format(display_character), end='', flush=True)
print()
return games_list, number_of_wins, number_of_draws, number_of_losses
def DisplayLostGame(authority, games_list):
for game in games_list:
if game[1] == 'O':
logging.info("Lost game:")
authority.DisplayGame(game[0])
break
if __name__ == '__main__':
main()
|
{"/arenas/connect4.py": ["/rules/connect4.py", "/simulation/simulator.py", "/architectures/connect4.py"], "/rules/connect4.py": ["/rules/authority.py"], "/architectures/connect4.py": ["/simulation/simulator.py"], "/training/learn_tictactoe.py": ["/rules/tictactoe.py", "/simulation/simulator.py"], "/rules/tictactoe.py": ["/rules/authority.py"]}
|
19,199,988
|
sebastiengilbert73/gameDeepRL
|
refs/heads/master
|
/simulation/simulator.py
|
import abc
import numpy as np
import random
class Simulator(abc.ABC):
def __init__(self, final_decision_softmax_temperature=0.0):
super().__init__()
self.final_decision_softmax_temperature = final_decision_softmax_temperature
@abc.abstractmethod
def ChooseMoveCoordinatesQuick(self, authority, position, player):
pass
def ChooseMoveCoordinatesMonteCarlo(self, authority, maximum_number_of_moves, number_of_simulations,
starting_position, player):
legal_move_to_statistics_list = self.LegalMoveStatistics(authority, maximum_number_of_moves,
number_of_simulations, starting_position,
player)
if self.final_decision_softmax_temperature <= 0: # Hard max
highest_expected_value = -2.0
chosen_move_coordinates = []
for (move, stats) in legal_move_to_statistics_list:
expected_value = (stats[0] - stats[2])/(stats[0] + stats[1] + stats[2])
if expected_value > highest_expected_value:
highest_expected_value = expected_value
chosen_move_coordinates = [move]
elif expected_value == highest_expected_value:
chosen_move_coordinates.append(move)
if len(chosen_move_coordinates) == 0:
raise ValueError(
"Simulator.ChooseMoveCoordinatesMonteCarlo(): chosen_move_coordinates is empty. legal_move_to_statistics_list = {}".format(
legal_move_to_statistics_list))
return random.choice(chosen_move_coordinates)
# Softmax
move_to_expected_value_dic = {move: (stats[0] - stats[2])/(stats[0] + stats[1] + stats[2]) for (move, stats) in legal_move_to_statistics_list}
# Normalize
sum = 0
#backup_sum = 0 # Sure to be > 0
move_to_choice_probability = {}
for move, expected_value in move_to_expected_value_dic.items():
#sum += math.exp(expected_value / self.final_decision_softmax_temperature) - 1.0
sum += math.exp(expected_value / self.final_decision_softmax_temperature)
if sum > 0:
for move, expected_value in move_to_expected_value_dic.items():
move_to_choice_probability[move] = (math.exp(expected_value / self.final_decision_softmax_temperature) )/ sum
else:
raise ValueError(
"Simulator.ChooseMoveCoordinatesMonteCarlo(): sum of exponentials ({}) is not > 0. move_to_expected_value_dic = {}".format(sum, move_to_expected_value_dic))
# print("ConvPredictor.ChooseMoveCoordinates(): move_to_choice_probability = \n{}".format(move_to_choice_probability))
# Draw a random number
random_draw = random.random()
running_sum = 0
for move, probability in move_to_choice_probability.items():
running_sum += probability
if running_sum >= random_draw:
return move
raise RuntimeError(
"Simulator.ChooseMoveCoordinatesMonteCarlo(): Summed the probabilities without reaching the random number {}. move_to_choice_probability = {}".format(
random_draw, move_to_choice_probability))
def SimulateGame(self, authority, maximum_number_of_moves, starting_position=None,
player=None):
if starting_position is None:
starting_position = authority.InitialPosition()
players = authority.PlayersList()
if player is None:
player = players[0]
winner = None
number_of_moves = 0
position = starting_position
positionsList = [position]
while winner is None and number_of_moves < maximum_number_of_moves:
move_coordinates = self.ChooseMoveCoordinatesQuick(authority, position, player)
position, winner = authority.MoveWithMoveArrayCoordinates(position, player, move_coordinates)
number_of_moves += 1
positionsList.append(position)
player = authority.OtherPlayer(player)
return positionsList, winner
def LegalMoveStatistics(self, authority, maximum_number_of_moves, number_of_simulations,
starting_position, player):
legal_moves_coords = authority.LegalMoveCoordinates(starting_position, player)
# len(legal_moves_coords) = number_of_legal_moves; Each coords in a 4-element list
if len(legal_moves_coords) == 0:
return []
legal_move_to_statistics_list = []
for moveNdx in range(len(legal_moves_coords)):
move_coordinates = legal_moves_coords[moveNdx]
number_of_wins = 0
number_of_draws = 0
number_of_losses = 0
other_player = authority.OtherPlayer(player)
position_after_candidate_move, winner = authority.MoveWithMoveArrayCoordinates(
starting_position, player, move_coordinates)
if winner == player:
legal_move_to_statistics_list.append((move_coordinates, (number_of_simulations, 0, 0)))
elif winner == other_player:
legal_move_to_statistics_list.append((move_coordinates, (0, 0, number_of_simulations)))
elif winner == 'draw':
legal_move_to_statistics_list.append((move_coordinates, (0, number_of_simulations, 0)))
else: # None: The game is not finished
for simulationNdx in range(number_of_simulations):
positionsList, winner = self.SimulateGame(authority, maximum_number_of_moves,
position_after_candidate_move,
other_player)
if winner == player:
number_of_wins += 1
elif winner == other_player:
number_of_losses += 1
else:
number_of_draws += 1
legal_move_to_statistics_list.append((move_coordinates, (number_of_wins, number_of_draws, number_of_losses)))
# Normalize
legal_move_to_normalized_stats_list = []
for (move, stats) in legal_move_to_statistics_list:
normalized_stats = (stats[0]/number_of_simulations, stats[1]/number_of_simulations, stats[2]/number_of_simulations)
legal_move_to_normalized_stats_list.append((move, normalized_stats))
return legal_move_to_normalized_stats_list
def SimulateAsymmetricGame(self, authority, other_player_simulator,
maximum_number_of_moves, starting_position=None,
starting_player=None):
if starting_position is None:
starting_position = authority.InitialPosition()
if starting_player is None:
players = authority.PlayersList()
starting_player = players[0]
winner = None
number_of_moves = 0
position = starting_position
current_player = starting_player
positionsList = [position]
while winner is None and number_of_moves < maximum_number_of_moves:
move_coordinates = None
if current_player == starting_player:
move_coordinates = self.ChooseMoveCoordinatesQuick(authority, position, current_player)
else:
move_coordinates = other_player_simulator.ChooseMoveCoordinatesQuick(authority, position, current_player)
position, winner = authority.MoveWithMoveArrayCoordinates(position, current_player, move_coordinates)
number_of_moves += 1
positionsList.append(position)
current_player = authority.OtherPlayer(current_player)
return positionsList, winner
def SimulateAsymmetricGameMonteCarlo(self, authority, other_player_simulator,
maximum_number_of_moves, number_of_simulations,
starting_position=None, starting_player=None):
if starting_position is None:
starting_position = authority.InitialPosition()
if starting_player is None:
players = authority.PlayersList()
starting_player = players[0]
winner = None
number_of_moves = 0
position = starting_position
current_player = starting_player
positionsList = [position]
while winner is None and number_of_moves < maximum_number_of_moves:
move_coordinates = None
if current_player == starting_player:
move_coordinates = self.ChooseMoveCoordinatesMonteCarlo(authority, maximum_number_of_moves,
number_of_simulations,
position, current_player)
else:
move_coordinates = other_player_simulator.ChooseMoveCoordinatesMonteCarlo(authority,
maximum_number_of_moves,
number_of_simulations,
position, current_player)
position, winner = authority.MoveWithMoveArrayCoordinates(position, current_player, move_coordinates)
number_of_moves += 1
positionsList.append(position)
current_player = authority.OtherPlayer(current_player)
return positionsList, winner
"""
def ChooseAMoveMonteCarlo(self, authority, maximum_number_of_moves, number_of_simulations,
starting_position, player):
legal_move_to_statistics_list = self.LegalMoveStatistics(authority, maximum_number_of_moves,
number_of_simulations, starting_position,
player)
highest_expected_value = -2.0
chosen_move_coordinates = None
for (move, statistics) in legal_move_to_statistics_list:
expected_value = (statistics[0] - statistics[2])/number_of_simulations # win_rate - loss_rate
#print("ChooseAMoveMonteCarlo(): move = {}; statistics = {}".format(move, statistics))
if expected_value > highest_expected_value:
highest_expected_value = expected_value
chosen_move_coordinates = move
if chosen_move_coordinates is None:
raise ValueError("Authority.ChooseAMoveMonteCarlo(): chosen_move_coordinates is None. legal_move_to_statistics_list = {}".format(legal_move_to_statistics_list))
return chosen_move_coordinates
"""
def SetFinalDecisionSoftmaxTemperature(self, temperature):
self.final_decision_softmax_temperature = temperature
class RandomSimulator(Simulator):
def __init__(self):
super().__init__()
def ChooseMoveCoordinatesQuick(self, authority, position, player):
legal_move_coordinates = authority.LegalMoveCoordinates(position, player)
return random.choice(legal_move_coordinates)
|
{"/arenas/connect4.py": ["/rules/connect4.py", "/simulation/simulator.py", "/architectures/connect4.py"], "/rules/connect4.py": ["/rules/authority.py"], "/architectures/connect4.py": ["/simulation/simulator.py"], "/training/learn_tictactoe.py": ["/rules/tictactoe.py", "/simulation/simulator.py"], "/rules/tictactoe.py": ["/rules/authority.py"]}
|
19,199,989
|
sebastiengilbert73/gameDeepRL
|
refs/heads/master
|
/rules/tictactoe.py
|
import rules.authority
import numpy as np
import ast
class Authority(rules.authority.GameAuthority):
def __init__(self):
super().__init__()
self.playersList = ['X', 'O']
self.positionArrayShape = (2, 1, 3, 3)
self.moveArrayShape = (1, 1, 3, 3)
self.playerToPlaneIndexDic = {'X': 0, 'O': 1}
def Move(self, currentPositionArr, player, moveArr):
if moveArr.shape != self.moveArrayShape:
raise ValueError("Authority.Move(): moveArr.shape ({}) is not (1, 1, 3, 3)".format(moveArr.shape))
dropCoordinates = self.DropCoordinates(moveArr)
return self.MoveWithCoordinates(currentPositionArr, player, dropCoordinates)
def Winner(self, positionArr, lastPlayerWhoPlayed):
Xwins = self.ThereIs3InARow(self.playerToPlaneIndexDic['X'], positionArr)
Owins = self.ThereIs3InARow(self.playerToPlaneIndexDic['O'], positionArr)
if Xwins:
return 'X'
if Owins:
return 'O'
else:
if np.count_nonzero(positionArr) == 9: # All squares are occupied
return 'draw'
else:
return None
def LegalMovesMask(self, positionArr, player):
if positionArr.shape != self.positionArrayShape:
raise ValueError("Authority.LegalMovesMask(): The shape of positionArr ({}) is not {}".format(
positionArr.shape, self.positionArrayShape))
legalMovesMask = np.ones(self.moveArrayShape, dtype=np.uint8) # Initialized with ones, i.e legal moves
for row in range(3):
for column in range(3):
if positionArr[0, 0, row, column] != 0 or positionArr[1, 0, row, column] != 0:
legalMovesMask[0, 0, row, column] = 0
return legalMovesMask
def LegalMoveCoordinates(self, positionArr, player):
coordinatesList = []
for row in range(3):
for column in range(3):
if positionArr[0, 0, row, column] == 0 and positionArr[1, 0, row, column] == 0:
coordinatesList.append([0, 0, row, column])
return coordinatesList
def PositionArrayShape(self):
return self.positionArrayShape
def MoveArrayShape(self):
return self.moveArrayShape
def InitialPosition(self):
initialPosition = np.zeros(self.positionArrayShape, dtype=np.uint8)
return initialPosition
def SwapPositions(self, positionArr):
swappedPosition = np.copy(positionArr)
playerXPlaneNdx = self.playerToPlaneIndexDic['X']
playerOPlaneNdx = self.playerToPlaneIndexDic['O']
swappedPosition[playerXPlaneNdx] = positionArr[playerOPlaneNdx]
swappedPosition[playerOPlaneNdx] = positionArr[playerXPlaneNdx]
return swappedPosition
def PlayersList(self):
return self.playersList
def MoveWithString(self, currentPositionArr, player, dropCoordinatesAsString):
dropCoordinatesTuple = ast.literal_eval(dropCoordinatesAsString)
return self.MoveWithCoordinates(currentPositionArr, player, dropCoordinatesTuple)
def Display(self, positionArr):
"""if positionArr.shape != self.positionArrayShape: # (C, D, H, W)
raise ValueError("Authority.Display(): The shape of positionArr ({}) is not (2, 1, 3, 3)".format(
positionArr.shape))
"""
for row in range(3):
for column in range(3):
# occupancy = None
if positionArr[self.playerToPlaneIndexDic['X'], 0, row, column] == 1:
print(' X ', end='', flush=True)
elif positionArr[self.playerToPlaneIndexDic['O'], 0, row, column] == 1:
print(' O ', end='', flush=True)
else:
print(' ', end='', flush=True)
if column != 2:
print('|', end='', flush=True)
else:
print('') # new line
if row != 2:
print('--- --- ---')
print("***********")
def RaiseAnErrorIfNoLegalMove(self):
return True
def ThereIs3InARow(self, planeNdx, positionArr):
if positionArr.shape != self.positionArrayShape: # (C, D, H, W)
raise ValueError("Authority.ThereIs3InARow(): The shape of positionArr ({}) is not (2, 1, 3, 3)".format(positionArr.shape))
# Horizontal lines
for row in range(3):
theRowIsFull = True
for column in range(3):
if positionArr[planeNdx, 0, row, column] != 1:
theRowIsFull = False
break
if theRowIsFull:
return True
# Vertical lines
for column in range(3):
theColumnIsFull = True
for row in range(3):
if positionArr[planeNdx, 0, row, column] != 1:
theColumnIsFull = False
break
if theColumnIsFull:
return True
# Diagonal \
diagonalBackslashIsFull = True
for index in range(3):
if positionArr[planeNdx, 0, index, index] != 1:
diagonalBackslashIsFull = False
break
if diagonalBackslashIsFull:
return True
# Diagonal /
diagonalSlashIsFull = True
for index in range(3):
if positionArr[planeNdx, 0, index, 2 - index] != 1:
diagonalSlashIsFull = False
break
if diagonalSlashIsFull:
return True
# Otherwise
return False
def MoveWithCoordinates(self, currentPositionArr, player, dropCoordinates):
if currentPositionArr.shape != self.positionArrayShape: # (C, D, H, W)
raise ValueError("Authority.MoveWithCoordinates(): The shape of currentPositionArr ({}) is not (2, 1, 3, 3)".format(currentPositionArr.shape))
if player != 'X' and player != 'O':
raise ValueError("Authority.MoveWithCoordinates(): The player must be 'X' or 'O', not '{}'".format(player))
if len(dropCoordinates) != 2:
raise ValueError("Authority.MoveWithCoordinates(): dropCoordinates ({}) is not a 2-tuple".format(dropCoordinates))
if dropCoordinates[0] < 0 or dropCoordinates[0] > 2 or dropCoordinates[1] < 0 or dropCoordinates[1] > 2:
raise ValueError("Authority.MoveWithCoordinates(): dropCoordinates entries ({}) are not in the range [0, 2]".format(dropCoordinates))
if currentPositionArr[0, 0, dropCoordinates[0], dropCoordinates[1]] != 0 or \
currentPositionArr[1, 0, dropCoordinates[0], dropCoordinates[1]] != 0:
raise ValueError("Authority.MoveWithCoordinates(): Attempt to drop in an occupied square ({})".format(dropCoordinates))
newPositionArr = currentPositionArr.copy()
newPositionArr[self.playerToPlaneIndexDic[player], 0, dropCoordinates[0], dropCoordinates[1]] = 1
winner = self.Winner(newPositionArr, player)
return newPositionArr, winner
def DropCoordinates(self, moveArr):
numberOfOnes = 0
dropCoordinates = None
for row in range(3):
for column in range(3):
if moveArr[0, 0, row, column] == 1:
numberOfOnes += 1
dropCoordinates = (row, column)
if numberOfOnes != 1:
raise ValueError("Authority.DropCoordinates(): The number of ones in moveArr ({}) is not one".format(numberOfOnes))
return dropCoordinates
def MoveWithMoveArrayCoordinates(self, current_position, player, move_array_coordinates):
if move_array_coordinates is None:
errorMsg = "Authority.MoveWithMoveArrayCoordinates(): move_array_coordinates is None. current_position = {}; player = {}".format(current_position, player)
raise ValueError(errorMsg)
if current_position[move_array_coordinates[0],
move_array_coordinates[1],
move_array_coordinates[2],
move_array_coordinates[3] ] != 0 :
raise ValueError("Authority.MoveWithMoveArrayCoordinates(): Attempt to drop in an occupied square ({})".format(move_array_coordinates))
new_position_arr = current_position.copy()
new_position_arr[
self.playerToPlaneIndexDic[player],
0,
move_array_coordinates[2],
move_array_coordinates[3] ] = 1
winner = self.Winner(new_position_arr, player)
return new_position_arr, winner
|
{"/arenas/connect4.py": ["/rules/connect4.py", "/simulation/simulator.py", "/architectures/connect4.py"], "/rules/connect4.py": ["/rules/authority.py"], "/architectures/connect4.py": ["/simulation/simulator.py"], "/training/learn_tictactoe.py": ["/rules/tictactoe.py", "/simulation/simulator.py"], "/rules/tictactoe.py": ["/rules/authority.py"]}
|
19,207,277
|
lyingCS/font
|
refs/heads/master
|
/test.py
|
import os
from fontforge import *
# font = open(os.sys.argv[1])
font = open('./fz.ttf')
for glyph in font:
if font[glyph].isWorthOutputting():
# font[glyph].export("./fz_bmp/"+str(font[glyph].unicode)+"_"+font[glyph].glyphname + ".bmp")
if(font[glyph].glyphname=="A"):
print(font[glyph].unicode)
print(font[glyph].foreground)
print(font[glyph].activeLayer)
print(font[glyph].anchorPoints)
print(font[glyph].background)
print(font[glyph].changed)
print(font[glyph].color)
print(font[glyph].comment)
print(font[glyph].dhints)
print(font[glyph].hhints)
print(font[glyph].horizontalComponents)
print(font[glyph].horizontalComponentItalicCorrection)
print(font[glyph].horizontalVariants)
print(font[glyph].layer_cnt)
print(font[glyph].lcarets)
print("left",font[glyph].left_side_bearing)
print(font[glyph].mathKern.bottomLeft)
print(font[glyph].originalgid)
print(font[glyph].persistent)
print(font[glyph].references)
print(font[glyph].right_side_bearing)
print(font[glyph].script)
print(font[glyph].texheight)
print(font[glyph].texdepth)
print(font[glyph].width)
print(font[glyph].vwidth)
font = open('./output.ttf')
for glyph in font:
if font[glyph].isWorthOutputting():
# font[glyph].export("./fz_bmp/"+str(font[glyph].unicode)+"_"+font[glyph].glyphname + ".bmp")
if(font[glyph].glyphname=="A"):
print(font[glyph].unicode)
print(font[glyph].foreground)
print(font[glyph].activeLayer)
print(font[glyph].anchorPoints)
print(font[glyph].background)
print(font[glyph].changed)
print(font[glyph].color)
print(font[glyph].comment)
print(font[glyph].dhints)
print(font[glyph].hhints)
print(font[glyph].horizontalComponents)
print(font[glyph].horizontalComponentItalicCorrection)
print(font[glyph].horizontalVariants)
print(font[glyph].layer_cnt)
print(font[glyph].lcarets)
print("left",font[glyph].left_side_bearing)
print(font[glyph].mathKern.bottomLeft)
print(font[glyph].originalgid)
print(font[glyph].persistent)
print(font[glyph].references)
print(font[glyph].right_side_bearing)
print(font[glyph].script)
print(font[glyph].texheight)
print(font[glyph].texdepth)
print(font[glyph].width)
print(font[glyph].vwidth)
|
{"/fontProcessing.py": ["/imgFuntions.py"]}
|
19,237,490
|
andrianl/dndplay
|
refs/heads/master
|
/Main/classes.py
|
from collections import defaultdict
import Main.armor as armor
import Main.stats
import Main.stats as stat
import Main.weapon as weapons
import constants.dices as dice
class CharClass:
"""
A generic Character Class (not to be confused with builtin class)
"""
name = "Default"
level = 1
hit_die = dice.D10
primary_abilities = Main.stats.Ability
weapon_proficiencies = ()
armor_proficiencies = ()
proficiencies_text = ''
saving_throw_proficiencies = (Main.stats.Ability, Main.stats.Ability)
languages = ()
num_skill_choices = 2
choose_skills = []
skills_proficiency = ()
start_equipment = ()
spell_casting = False
spellcasting_ability = Main.stats.Ability
spell_slots_by_level = int
spells_known = ()
spells_prepared = ()
subclass = False
subclasses_available = ()
features_by_level = defaultdict(list)
description = """""" """"""
class Barbarian(CharClass):
name = 'Barbarian'
hit_die = dice.D12
primary_abilities = stat.Strength
weapon_proficiencies = weapons.martial_weapons & weapons.simple_weapons
armor_proficiencies = (armor.LightArmor, armor.MediumArmor)
saving_throw_proficiencies = (stat.Strength, stat.Constitution)
choose_skills = [stat.animal_handling, stat.athletics, stat.intimidation,
stat.nature, stat.perception, stat.survival]
|
{"/Main/stats.py": ["/Main/armor.py", "/Main/dice.py"], "/Main/weapon.py": ["/Main/stats.py", "/Main/wage.py"], "/Main/race.py": ["/Main/weapon.py"], "/Main/character.py": ["/Main/armor.py", "/Main/backgrounds.py", "/Main/classes.py", "/Main/dice.py", "/Main/race.py", "/Main/stats.py", "/Main/weapon.py"], "/Main/armor.py": ["/Main/wage.py"], "/UI/main.py": ["/Main/character.py", "/Main/dice.py", "/Main/weapon.py", "/UI/character_sheet.py"], "/Main/backgrounds.py": ["/Main/stats.py"], "/Main/classes.py": ["/Main/armor.py", "/Main/stats.py", "/Main/weapon.py"]}
|
19,330,961
|
grischa/mytardis
|
refs/heads/master
|
/tardis/default_settings.py
|
# pylint: disable=wildcard-import
from datetime import timedelta # noqa # pylint: disable=W0614
from os import path
from tempfile import gettempdir
import djcelery
# MUST change this to False for any serious use.
DEBUG = True
ADMINS = (('bob', 'bob@bobmail.com'), )
MANAGERS = ADMINS
# Dictionary containing the settings for all databases to be used.
# The DATABASES setting must configure a default database;
# any number of additional databases may also be specified.
DATABASES = {
'default': {
# 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'ENGINE': 'django.db.backends.sqlite3',
# Name of the database to use. For SQLite, it's the full path.
'NAME': 'db.sqlite3',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# Fix 'SQLite backend does not support timezone-aware datetimes
# when USE_TZ is False.' error by setting USE_TZ to True
USE_TZ = True
# Celery queue
BROKER_URL = 'django://'
'''
use django:, add kombu.transport.django to INSTALLED_APPS
or use redis: install redis separately and add the following to a
custom buildout.cfg:
django-celery-with-redis
redis
hiredis
'''
# BROKER_URL = 'redis://localhost:6379/0'
# CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
# A dictionary containing the settings for all caches to be used with
# Django. The CACHES setting must configure a default cache; any
# number of additional caches may also be specified. Once the cache
# is set up, you'll need to add
# 'django.middleware.cache.UpdateCacheMiddleware' and
# 'django.middleware.cache.FetchFromCacheMiddleware'
# to your MIDDLEWARE_CLASSES setting below
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'default_cache',
},
# # or use memcached
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'LOCATION': '127.0.0.1:11211',
# },
'celery-locks': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'celery_lock_cache',
}
}
'''
change the CACHES setting to memcached if you prefer. Requires additional
dependencies.
'''
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Australia/Melbourne'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Date format to use by default. ("jS F Y" => "8th March 2012")
# https://docs.djangoproject.com/en/1.3/ref/templates/builtins/#std:templatefilter-date # noqa
DATE_FORMAT = "jS F Y"
DATETIME_FORMAT = "jS F Y H:i"
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# SECRET_KEY has been removed. Generate one by referring to build.sh
ALLOWED_HOSTS = ['*']
'''
For security reasons this needs to be set to your hostname and/or IP
address in production.
'''
SITE_TITLE = 'MyTardis'
'''
customise the title of your site
'''
SPONSORED_TEXT = None
'''
add text to the footer to acknowledge someone
'''
MIDDLEWARE_CLASSES = (
# 'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'tardis.tardis_portal.logging_middleware.LoggingMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'tardis.tardis_portal.auth.token_auth.TokenAuthMiddleware',
# 'django.middleware.cache.FetchFromCacheMiddleware',
)
ROOT_URLCONF = 'tardis.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
path.join(path.dirname(__file__),
'tardis_portal/templates/').replace('\\', '/'),
],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'tardis.tardis_portal.context_processors'
'.global_contexts',
'tardis.tardis_portal.context_processors'
'.single_search_processor',
'tardis.tardis_portal.context_processors'
'.tokenuser_processor',
'tardis.tardis_portal.context_processors'
'.registration_processor',
'tardis.tardis_portal.context_processors'
'.user_details_processor',
'tardis.tardis_portal.context_processors'
'.manage_account_processor',
'tardis.tardis_portal.context_processors'
'.google_analytics',
],
'loaders': [
'django.template.loaders.app_directories.Loader',
'django.template.loaders.filesystem.Loader',
],
},
}
]
STATIC_DOC_ROOT = path.join(path.dirname(__file__),
'tardis_portal/site_media').replace('\\', '/')
def get_admin_media_path():
import pkgutil
package = pkgutil.get_loader("django.contrib.admin")
return path.join(package.filename, 'static', 'admin')
ADMIN_MEDIA_STATIC_DOC_ROOT = get_admin_media_path()
# FILE_STORE_PATH = path.abspath(path.join(path.dirname(__file__),
# '../var/store/')).replace('\\', '/')
STAGING_PATH = path.abspath(path.join(path.dirname(__file__),
'../var/staging/')).replace('\\', '/')
# SYNC_TEMP_PATH = path.abspath(path.join(path.dirname(__file__),
# '../var/sync/')).replace('\\', '/')
DEFAULT_STORAGE_BASE_DIR = path.abspath(path.join(path.dirname(__file__),
'../var/store/')).replace('\\', '/')
# LEGACY, ignore
FILE_STORE_PATH = DEFAULT_STORAGE_BASE_DIR
INITIAL_LOCATIONS = {}
METADATA_STORE_PATH = DEFAULT_STORAGE_BASE_DIR
'''
storage path for image paths stored in parameters. Better to set to another
location if possible
'''
STAGING_PROTOCOL = 'ldap'
STAGING_MOUNT_PREFIX = 'smb://localhost/staging/'
STAGING_MOUNT_USER_SUFFIX_ENABLE = False
REQUIRE_DATAFILE_CHECKSUMS = True
REQUIRE_DATAFILE_SIZES = True
REQUIRE_VALIDATION_ON_INGESTION = True
DEFAULT_FILE_STORAGE = \
'tardis.tardis_portal.storage.MyTardisLocalFileSystemStorage'
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = DEFAULT_STORAGE_BASE_DIR
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = None
# Static content location
STATIC_URL = '/static/'
# Used by "django collectstatic"
STATIC_ROOT = path.abspath(path.join(path.dirname(__file__), '..', 'static'))
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
# ADMIN_MEDIA_PREFIX = STATIC_URL + '/admin/'
STATICFILES_DIRS = (
('admin', ADMIN_MEDIA_STATIC_DOC_ROOT),
)
# Use cachable copies of static files
STATICFILES_STORAGE = \
'django.contrib.staticfiles.storage.CachedStaticFilesStorage'
# A tuple of strings designating all applications that are enabled in
# this Django installation.
TARDIS_APP_ROOT = 'tardis.apps'
INSTALLED_APPS = (
'django_extensions',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.humanize',
'registration',
'django_jasmine',
'djcelery',
'kombu.transport.django',
'bootstrapform',
'mustachejs',
'tastypie',
'tastypie_swagger',
'tardis.tardis_portal',
'tardis.tardis_portal.templatetags',
'tardis.search',
'tardis.analytics',
# these optional apps, may require extra settings
'tardis.apps.publication_forms',
'tardis.apps.oaipmh',
# 'tardis.apps.push_to',
)
INDEX_VIEWS = {}
'''
A custom index page override is defined in as dictionary mapping a class-based
view (or view function) to a Django ``Site``, specified by SITE_ID (an integer)
or the domain name of the incoming request.
See: https://mytardis.readthedocs.org/en/develop/contextual_views.html#custom-index-view
eg:
::
INDEX_VIEWS = {
1: 'tardis.apps.my_custom_app.views.MyCustomIndexSubclass',
'store.example.com': 'tardis.apps.myapp.AnotherCustomIndexSubclass'
}
'''
DATASET_VIEWS = []
'''
Dataset view overrides ('contextual views') are specified as tuples mapping
a Schema namespace to a class-based view (or view function).
See: https://mytardis.readthedocs.org/en/develop/contextual_views.html#dataset-and-experiment-views
eg:
::
DATASET_VIEWS = [
('http://example.org/schemas/dataset/my_awesome_schema',
'tardis.apps.my_awesome_app.views.CustomDatasetViewSubclass'),
]
'''
EXPERIMENT_VIEWS = []
'''
Experiment view overrides ('contextual views') are specified as tuples mapping
a Schema namespace to a class-based view (or view function).
See: https://mytardis.readthedocs.org/en/develop/contextual_views.html#dataset-and-experiment-views
eg:
::
EXPERIMENT_VIEWS = [
('http://example.org/schemas/expt/my_awesome_schema',
'tardis.apps.my_awesome_app.views.CustomExptViewSubclass'),
]
'''
JASMINE_TEST_DIRECTORY = path.abspath(path.join(path.dirname(__file__),
'tardis_portal',
'tests',
'jasmine'))
USER_PROVIDERS = (
'tardis.tardis_portal.auth.localdb_auth.DjangoUserProvider',
)
GROUP_PROVIDERS = (
'tardis.tardis_portal.auth.localdb_auth.DjangoGroupProvider',
'tardis.tardis_portal.auth.token_auth.TokenGroupProvider',
)
# AUTH_PROVIDERS entry format:
# ('name', 'display name', 'backend implementation')
# name - used as the key for the entry
# display name - used as the displayed value in the login form
# backend implementation points to the actual backend implementation
#
# In most cases, the backend implementation should be a fully
# qualified class name string, whose class can be instantiated without
# any arguments. For LDAP authentication, the
# 'tardis.tardis_portal.auth.ldap_auth.LDAPBackend'
# class can't be instantiated without any arguments, so the
# 'tardis.tardis_portal.auth.ldap_auth.ldap_auth'
# wrapper function should be used instead.
#
# We will assume that localdb will always be a default AUTH_PROVIDERS entry
AUTH_PROVIDERS = (
('localdb', 'Local DB',
'tardis.tardis_portal.auth.localdb_auth.DjangoAuthBackend'),
)
SFTP_USERNAME_ATTRIBUTE = 'email'
'''
The attribute from the User model ('email' or 'username') used to generate
the SFTP login example on the sftp_access help page.
'''
# default authentication module for experiment ownership user during
# ingestion? Must be one of the above authentication provider names
DEFAULT_AUTH = 'localdb'
AUTH_PROFILE_MODULE = 'tardis_portal.UserProfile'
# New users are added to these groups by default.
NEW_USER_INITIAL_GROUPS = []
ACCOUNT_ACTIVATION_DAYS = 3
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'tardis.tardis_portal.auth.authorisation.ACLAwareBackend',
)
# Email Configuration
EMAIL_PORT = 587
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'bob@bobmail.com'
EMAIL_HOST_PASSWORD = 'bob'
EMAIL_USE_TLS = True
# Post Save Filters
# POST_SAVE_FILTERS = [
# ("tardis.tardis_portal.filters.exif.make_filter",
# ["EXIF", "http://exif.schema"]), # this filter requires pyexiv2
# # http://tilloy.net/dev/pyexiv2/
# ]
# Post Save Filters
# POST_SAVE_FILTERS = [
# ("tardis.tardis_portal.filters.diffractionimage.make_filter",
# ["DIFFRACTION", "http://www.tardis.edu.au/schemas/trdDatafile/1",
# "/Users/steve/Desktop/diffdump"]), # requires ccp4 diffdump binary
# ]
# logging levels are: DEBUG, INFO, WARN, ERROR, CRITICAL
SYSTEM_LOG_LEVEL = 'INFO'
MODULE_LOG_LEVEL = 'INFO'
SYSTEM_LOG_FILENAME = 'request.log'
MODULE_LOG_FILENAME = 'tardis.log'
# Rollover occurs whenever the current log file is nearly maxBytes in length;
# if maxBytes is zero, rollover never occurs
SYSTEM_LOG_MAXBYTES = 0
MODULE_LOG_MAXBYTES = 0
# Uploadify root folder path, relative to STATIC root
UPLOADIFY_PATH = '%s/%s' % (STATIC_URL, 'js/lib/uploadify')
# Upload path that files are sent to
UPLOADIFY_UPLOAD_PATH = '%s/%s' % (MEDIA_URL, 'uploads')
# Download size limit: zero means no limit
DOWNLOAD_ARCHIVE_SIZE_LIMIT = 0
# Render image file size limit: zero means no limit
RENDER_IMAGE_SIZE_LIMIT = 0
# Max number of images in dataset view's carousel: zero means no limit
MAX_IMAGES_IN_CAROUSEL = 100
# temporary download file location
DOWNLOAD_TEMP_DIR = gettempdir()
# Safety margin for temporary space when downloading. (Estimated archive
# file size + safety_margin must be less that available disk space ...)
DOWNLOAD_SPACE_SAFETY_MARGIN = 8388608
# Turn on/off the self-registration link and form
REGISTRATION_OPEN = True
# or disable registration app (copy to your settings.py first!)
# INSTALLED_APPS = filter(lambda x: x != 'registration', INSTALLED_APPS)
# Settings for the single search box
SINGLE_SEARCH_ENABLED = False
# flip this to turn on search:
if SINGLE_SEARCH_ENABLED:
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.'
'ElasticsearchSearchEngine',
'URL': 'http://127.0.0.1:9200/',
'INDEX_NAME': 'haystack',
},
}
else:
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
if SINGLE_SEARCH_ENABLED:
INSTALLED_APPS = INSTALLED_APPS + ('haystack',)
HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
DEFAULT_INSTITUTION = "Monash University"
TOKEN_EXPIRY_DAYS = 30
TOKEN_LENGTH = 30
TOKEN_USERNAME = 'tokenuser'
REQUIRE_VALID_PUBLIC_CONTACTS = True
# RIF-CS Settings
OAI_DOCS_PATH = path.abspath(path.join(path.dirname(__file__), '../var/oai'))
RIFCS_PROVIDERS = (
'tardis.tardis_portal.publish.provider.rifcsprovider.RifCsProvider',)
RIFCS_TEMPLATE_DIR = path.join(
path.dirname(__file__),
'tardis_portal/templates/tardis_portal/rif-cs/profiles/')
RIFCS_GROUP = "MyTARDIS Default Group"
RIFCS_KEY = "keydomain.example"
RELATED_INFO_SCHEMA_NAMESPACE = \
'http://www.tardis.edu.au/schemas/related_info/2011/11/10'
RELATED_OTHER_INFO_SCHEMA_NAMESPACE = \
'http://www.tardis.edu.au/schemas/experiment/annotation/2011/07/07'
DOI_ENABLE = False
DOI_XML_PROVIDER = 'tardis.tardis_portal.ands_doi.DOIXMLProvider'
# DOI_TEMPLATE_DIR = path.join(
# TARDIS_DIR, 'tardis_portal/templates/tardis_portal/doi/')
DOI_TEMPLATE_DIR = path.join('tardis_portal/doi/')
DOI_APP_ID = ''
DOI_NAMESPACE = 'http://www.tardis.edu.au/schemas/doi/2011/12/07'
DOI_MINT_URL = 'https://services.ands.org.au/home/dois/doi_mint.php'
DOI_RELATED_INFO_ENABLE = False
DOI_BASE_URL = 'http://mytardis.example.com'
OAIPMH_PROVIDERS = [
'tardis.apps.oaipmh.provider.experiment.DcExperimentProvider',
'tardis.apps.oaipmh.provider.experiment.RifCsExperimentProvider',
]
REDIS_VERIFY_MANAGER = False
'''
Uses REDIS to keep track of files that fail to verify
'''
REDIS_VERIFY_MANAGER_SETUP = {
'host': 'localhost',
'port': 6379,
'db': 1,
}
REDIS_VERIFY_DELAY = 86400 # 1 day = 86400
'''
delay between verification attempts in seconds
'''
CELERYBEAT_SCHEDULE = {
"verify-files": {
"task": "tardis_portal.verify_dfos",
"schedule": timedelta(seconds=300)
},
# enable this task for the publication workflow
# "update-publication-records": {
# "task": "apps.publication_forms.update_publication_records",
# "schedule": timedelta(seconds=300)
# },
}
djcelery.setup_loader()
# DEFAULT_LOCATION = "local"
# INITIAL_LOCATIONS = [{'name': DEFAULT_LOCATION,
# 'url': 'file://' + FILE_STORE_PATH,
# 'provider': 'local',
# 'type': 'online',
# 'priority': 10},
# # {'name': 'sync',
# # 'url': 'file://' + SYNC_PATH,
# # 'provider': 'local',
# # 'type': 'external',
# # 'priority': 8},
# {'name': 'staging',
# 'provider': 'local',
# 'url': 'file://' + STAGING_PATH,
# 'type': 'external',
# 'priority': 5}]
DEFAULT_MIGRATION_DESTINATION = 'unknown'
TRANSFER_PROVIDERS = {
'http': 'tardis.tardis_portal.transfer.SimpleHttpTransfer',
'dav': 'tardis.tardis_portal.transfer.WebDAVTransfer',
'local': 'tardis.tardis_portal.transfer.LocalTransfer'}
UPLOAD_METHOD = False
'''
Old version: UPLOAD_METHOD = "uploadify".
This can be changed to an app that provides an upload_button function,
eg. "tardis.apps.filepicker.views.upload_button" to use a fancy
commercial uploader.
To use filepicker, please also get an API key at http://filepicker.io
'''
# FILEPICKER_API_KEY = "YOUR KEY"
ARCHIVE_FILE_MAPPERS = {
'deep-storage': (
'tardis.apps.deep_storage_download_mapper.mapper.deep_storage_mapper',
),
}
# Site's default archive organization (i.e. path structure)
DEFAULT_ARCHIVE_ORGANIZATION = 'deep-storage'
DEFAULT_ARCHIVE_FORMATS = ['tar']
'''
Site's preferred archive types, with the most preferred first
other available option: 'tgz'. Add to list if desired
'''
# DEEP_DATASET_STORAGE = True
# '''
# Set to true if you want to preserve folder structure on "stage_file" ingest,
# eg. via the METS importer.
# Currently, only tested for the METS importer.
# '''
# Get version from git to be displayed on About page.
def get_git_version():
repo_dir = path.dirname(path.dirname(path.abspath(__file__)))
def run_git(args):
import subprocess
process = subprocess.Popen('git %s' % args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
cwd=repo_dir,
universal_newlines=True)
return process.communicate()[0]
try:
info = {
'commit_id': run_git("log -1 --format='%H'").strip(),
'date': run_git("log -1 --format='%cd' --date=rfc").strip(),
'branch': run_git("rev-parse --abbrev-ref HEAD").strip(),
'tag': run_git("describe --abbrev=0 --tags").strip(),
}
except Exception:
return ["unavailable"]
return info
MYTARDIS_VERSION = get_git_version()
# If you want enable user agent sensing, copy this to settings.py
# and uncomment it.
#
# USER_AGENT_SENSING = True
# if USER_AGENT_SENSING:
# from os import environ
# # Workaround for bug in ua_parser ... can't find its builtin copy
# # of regexes.yaml ... in versions 0.3.2 and earlier. Remove when fixed.
# environ['UA_PARSER_YAML'] = '/opt/mytardis/current/ua_parser_regexes.yaml'
#
# INSTALLED_APPS = INSTALLED_APPS + ('django_user_agents',)
# MIDDLEWARE_CLASSES = MIDDLEWARE_CLASSES + \
# ('django_user_agents.middleware.UserAgentMiddleware',)
AUTOGENERATE_API_KEY = False
'''
Generate a tastypie API key with user post_save
(tardis/tardis_portal/models/hooks.py)
'''
BLEACH_ALLOWED_TAGS = [
'a',
'abbr',
'acronym',
'b',
'blockquote',
'code',
'em',
'i',
'li',
'ol',
'strong',
'ul',
]
'''
These are the default bleach values and shown here as an example.
'''
BLEACH_ALLOWED_ATTRIBUTES = {
'a': ['href', 'title'],
'abbr': ['title'],
'acronym': ['title'],
}
'''
These are the default bleach values and shown here as an example.
'''
SFTP_PORT = 2200
SFTP_GEVENT = False
SFTP_HOST_KEY = (
"-----BEGIN RSA PRIVATE KEY-----\n"
"MIICXgIBAAKCAIEAl7sAF0x2O/HwLhG68b1uG8KHSOTqe3Cdlj5i/1RhO7E2BJ4B\n"
"3jhKYDYtupRnMFbpu7fb21A24w3Y3W5gXzywBxR6dP2HgiSDVecoDg2uSYPjnlDk\n"
"HrRuviSBG3XpJ/awn1DObxRIvJP4/sCqcMY8Ro/3qfmid5WmMpdCZ3EBeC0CAwEA\n"
"AQKCAIBSGefUs5UOnr190C49/GiGMN6PPP78SFWdJKjgzEHI0P0PxofwPLlSEj7w\n"
"RLkJWR4kazpWE7N/bNC6EK2pGueMN9Ag2GxdIRC5r1y8pdYbAkuFFwq9Tqa6j5B0\n"
"GkkwEhrcFNBGx8UfzHESXe/uE16F+e8l6xBMcXLMJVo9Xjui6QJBAL9MsJEx93iO\n"
"zwjoRpSNzWyZFhiHbcGJ0NahWzc3wASRU6L9M3JZ1VkabRuWwKNuEzEHNK8cLbRl\n"
"TyH0mceWXcsCQQDLDEuWcOeoDteEpNhVJFkXJJfwZ4Rlxu42MDsQQ/paJCjt2ONU\n"
"WBn/P6iYDTvxrt/8+CtLfYc+QQkrTnKn3cLnAkEAk3ixXR0h46Rj4j/9uSOfyyow\n"
"qHQunlZ50hvNz8GAm4TU7v82m96449nFZtFObC69SLx/VsboTPsUh96idgRrBQJA\n"
"QBfGeFt1VGAy+YTLYLzTfnGnoFQcv7+2i9ZXnn/Gs9N8M+/lekdBFYgzoKN0y4pG\n"
"2+Q+Tlr2aNlAmrHtkT13+wJAJVgZATPI5X3UO0Wdf24f/w9+OY+QxKGl86tTQXzE\n"
"4bwvYtUGufMIHiNeWP66i6fYCucXCMYtx6Xgu2hpdZZpFw==\n"
"-----END RSA PRIVATE KEY-----\n")
'''
public, useless key, debugging use only
'''
# Show the Rapid Connect login button.
RAPID_CONNECT_ENABLED = False
RAPID_CONNECT_CONFIG = {}
RAPID_CONNECT_CONFIG['secret'] = 'CHANGE_ME'
RAPID_CONNECT_CONFIG['authnrequest_url'] = ''
'''something like
'https://rapid.test.aaf.edu.au/jwt/authnrequest/research/XXXXXXXXXXXXXXXX'
'''
RAPID_CONNECT_CONFIG['iss'] = 'https://rapid.test.aaf.edu.au'
''' 'https://rapid.test.aaf.edu.au' or 'https://rapid.aaf.edu.au'
'''
RAPID_CONNECT_CONFIG['aud'] = 'https://example.com/rc/'
'''Public facing URL that accepts the HTTP/HTTPS POST request from
Rapid Connect.
'''
MANAGE_ACCOUNT_ENABLED = True
# Example settings for the publication form workflow. Also requires the
# corresponding app in 'INSTALLED_APPS' and the corresponding task to be
# enabled
# Publication form settings #
# PUBLICATION_NOTIFICATION_SENDER_EMAIL = 'emailsender@mytardisserver'
# PUBLICATION_OWNER_GROUP = 'publication-admin'
# PUBLICATION_SCHEMA_ROOT = 'http://www.tardis.edu.au/schemas/publication/'
# This schema holds bibliographic details including authors and
# acknowledgements
# PUBLICATION_DETAILS_SCHEMA = PUBLICATION_SCHEMA_ROOT + 'details/'
# Any experiment with this schema is treated as a draft publication
# This schema will be created automatically if not present
# PUBLICATION_DRAFT_SCHEMA = PUBLICATION_SCHEMA_ROOT + 'draft/'
# Form mappings
# PUBLICATION_FORM_MAPPINGS is a list of dictionaries that contain the
# following parameters:
# dataset_schema: the namespace of the schema that triggers the form to be used
# publication_schema: the namspace of the schema that should be added to the
# publication
# form_template: a URL to the form template (usually static HTML)
# PUBLICATION_FORM_MAPPINGS = [
# {'dataset_schema': 'http://example.com/a_dataset_schema',
# 'publication_schema': 'http://example.com/a_publication_schema',
# 'form_template': '/static/publication-form/form-template.html'}]
# Note: dataset_schema is treated as a regular expression
# The PDB publication schema is used for any experiments that reference a
# PDB structure
# It is defined here as a setting because it is used both for the publication
# form and for fetching data from PDB.org and must always match.
# PDB_PUBLICATION_SCHEMA_ROOT = 'http://synchrotron.org.au/pub/mx/pdb/'
# PDB_SEQUENCE_PUBLICATION_SCHEMA = PDB_PUBLICATION_SCHEMA_ROOT+'sequence/'
# PDB_CITATION_PUBLICATION_SCHEMA = PDB_PUBLICATION_SCHEMA_ROOT+'citation/'
# PDB_REFRESH_INTERVAL = timedelta(days=7)
# PUBLICATION_FORM_MAPPINGS = [
# {'dataset_schema': r'^http://synchrotron.org.au/mx/',
# 'publication_schema': PDB_PUBLICATION_SCHEMA_ROOT,
# 'form_template': '/static/publication-form/mx-pdb-template.html'},
# {'dataset_schema': r'^http://synchrotron.org.au/mx/',
# 'publication_schema': 'http://synchrotron.org.au/pub/mx/dataset/',
# 'form_template':
# '/static/publication-form/mx-dataset-description-template.html'}]
# Put your API_ID for the Monash DOI minting service here. For other DOI
# minting, please contact the developers
# MODC_DOI_API_ID = ''
# MODC_DOI_API_PASSWORD = ''
# MODC_DOI_MINT_DEFINITION = 'https://doiserver/modc/ws/MintDoiService.wsdl'
# MODC_DOI_ACTIVATE_DEFINITION = 'https://doiserver/modc/ws/' \
# 'ActivateDoiService.wsdl'
# MODC_DOI_DEACTIVATE_DEFINITION = 'https://doiserver/modc/ws/' \
# 'DeactivateDoiService.wsdl'
# MODC_DOI_ENDPOINT = 'https://doiserver/modc/ws/'
# MODC_DOI_MINT_URL_ROOT = 'http://mytardisserver/'
# Push-to app settings
# PUSH_TO_FROM_EMAIL = 'noreply@example.com'
|
{"/tardis/default_settings/static_files.py": ["/tardis/default_settings/storage.py"], "/tardis/urls.py": ["/tardis/tardis_portal/views/pages.py"], "/tardis/test_settings.py": ["/tardis/default_settings.py"]}
|
19,400,973
|
Caden4357/tvShows
|
refs/heads/master
|
/views.py
|
from django.shortcuts import render, HttpResponse,redirect
from .models import *
from django.contrib import messages
# Create your views here.
def index(request):
return render(request,"index.html")
def directors(request):
context = {
'directors': Director.objects.all()
}
return render(request, 'directors.html', context)
def create_director(request):
if request.method == "POST":
director = Director.objects.create(
name=request.POST['name']
)
return redirect('/directors')
def destroy_director(request,id):
return HttpResponse(f"Delete director{id}")
def show_director(request,id):
return HttpResponse(f"Show one director {id}")
def movies(request):
context = {
'directors': Director.objects.all(),
'movies': Movie.objects.all()
}
return render(request, 'movies.html', context)
def create_movie(request):
if request.method == "POST":
#validation
errors = Movie.objects.basic_validator(request.POST)
if len(errors) > 0:
for key,value in errors.items():
messages.error(request, value)
return redirect('/movies')
director = Director.objects.get(id=request.POST['director'])
movie = Movie.objects.create(
title=request.POST['title'],
description = request.POST['description'],
director = director
)
return redirect('/movies')
def show_movie(request,id):
return HttpResponse(f"Placeholder for show one movie {id}details")
def edit_movie(request,id):
if request.method == "POST":
context = {
'movie': Movie.objects.get(id=id),
'directors': Director.objects.all()
}
return render(request, 'edit_movie.html', context)
else:
return redirect('/movies')
def update_movie(request,id):
if request.method == "POST":
movie_to_update = Movie.objects.get(id=id)
movie_to_update.title = request.POST['title']
movie_to_update.description = request.POST['description']
movie_to_update.director = request.POST['director.id']
movie_to_update.save()
return redirect('/movies')
def destroy_movie(request,id):
if request.method == "POST":
movie_to_delete = Movie.objects.get(id=id)
movie_to_delete.delete()
return redirect('/movies')
|
{"/views.py": ["/models.py"]}
|
19,400,974
|
Caden4357/tvShows
|
refs/heads/master
|
/models.py
|
from django.db import models
# Create your models here.
class Director(models.Model):
name = models.CharField(max_length=255)
created_at = models.DateField(auto_now_add=True)
updated_at = models.DateField(auto_now=True)
class MovieManager(models.Manager):
def basic_validator(self, postData):
errors = {}
if len(postData['title']) < 2:
errors["name"] = "Movie name should be at least 2 characters"
if len(postData['description']) < 5:
errors["description"] = "Movie description should be at least 5 characters"
return errors
class Movie(models.Model):
title = models.CharField(max_length=250)
description=models.CharField(max_length=255)
director =models.ForeignKey(Director,related_name="movies", on_delete=models.CASCADE)
created_at = models.DateField(auto_now_add=True)
updated_at = models.DateField(auto_now=True)
objects = MovieManager()
class Actor(models.Model):
name = models.CharField(max_length=255)
movies = models.ManyToManyField(Movie, related_name='actors')
created_at = models.DateField(auto_now_add=True)
updated_at = models.DateField(auto_now=True)
|
{"/views.py": ["/models.py"]}
|
19,409,911
|
kathar0s/maldives
|
refs/heads/develop
|
/joonggo/serializers.py
|
# -*- coding: utf-8 -*-
from rest_framework import serializers
from joonggo.models import Article, Alarm
class ArticleSerializer(serializers.ModelSerializer):
source = serializers.SerializerMethodField(read_only=True)
def get_source(self, obj):
return obj.source.name
class Meta:
model = Article
fields = ('id', 'uid', 'title', 'content', 'price', 'url', 'created', 'source', 'source_id')
class AlarmSerializer(serializers.ModelSerializer):
class Meta:
model = Alarm
fields = '__all__'
|
{"/joonggo/serializers.py": ["/joonggo/models.py"], "/api/views.py": ["/joonggo/models.py", "/joonggo/serializers.py"], "/joonggobot/joonggobot_main.py": ["/joonggo/models.py"], "/joonggobot/signals.py": ["/joonggo/models.py", "/joonggobot/joonggobot_main.py"], "/joonggo/views.py": ["/joonggo/models.py"], "/joonggo/scraper/checkers.py": ["/joonggo/models.py"], "/joonggo/tasks.py": ["/joonggo/models.py"], "/joonggo/admin.py": ["/joonggo/models.py"], "/joonggo/scraper/cetizen_spiders.py": ["/joonggo/models.py"], "/joonggo/scraper/naver_standalone_checker.py": ["/joonggo/models.py"], "/joonggo/scraper/spiders.py": ["/joonggo/models.py"], "/joonggobot/apps.py": ["/joonggobot/signals.py"], "/joonggo/scraper/cetizen_standalone_checker.py": ["/joonggo/models.py"], "/joonggobot/views.py": ["/joonggobot/joonggobot_main.py"], "/api/urls.py": ["/api/views.py"]}
|
19,409,912
|
kathar0s/maldives
|
refs/heads/develop
|
/joonggo/__init__.py
|
# -*- coding: utf-8 -*-
default_app_config = 'joonggo.apps.JoongoConfig'
|
{"/joonggo/serializers.py": ["/joonggo/models.py"], "/api/views.py": ["/joonggo/models.py", "/joonggo/serializers.py"], "/joonggobot/joonggobot_main.py": ["/joonggo/models.py"], "/joonggobot/signals.py": ["/joonggo/models.py", "/joonggobot/joonggobot_main.py"], "/joonggo/views.py": ["/joonggo/models.py"], "/joonggo/scraper/checkers.py": ["/joonggo/models.py"], "/joonggo/tasks.py": ["/joonggo/models.py"], "/joonggo/admin.py": ["/joonggo/models.py"], "/joonggo/scraper/cetizen_spiders.py": ["/joonggo/models.py"], "/joonggo/scraper/naver_standalone_checker.py": ["/joonggo/models.py"], "/joonggo/scraper/spiders.py": ["/joonggo/models.py"], "/joonggobot/apps.py": ["/joonggobot/signals.py"], "/joonggo/scraper/cetizen_standalone_checker.py": ["/joonggo/models.py"], "/joonggobot/views.py": ["/joonggobot/joonggobot_main.py"], "/api/urls.py": ["/api/views.py"]}
|
19,409,913
|
kathar0s/maldives
|
refs/heads/develop
|
/joonggo/views.py
|
# -*- coding: utf-8 -*-
from django.http import HttpResponse
from django.shortcuts import render, render_to_response
from joonggo.models import Article, Source
def index(request):
counter = {
'joonggonara': Article.objects.filter(source__name='중고나라').count(),
'momsholic': Article.objects.filter(source__name='맘스홀릭').count(),
'cetizen': Article.objects.filter(source__name='세티즌').count()
}
template_data = {
'counter': counter
}
return render(request, 'index.html', template_data)
def search(request):
template_data = {}
return render(request, 'search.html', template_data)
def alarm(request):
template_data = {}
return render(request, 'alarm.html', template_data)
def sell(request):
get = request.GET.copy()
if 'token' in get:
html_file = 'sell.html'
source = Source.objects.all()
template_data = {'source': source}
else:
html_file = 'naverlogin.html'
template_data = {}
return render(request, html_file, template_data)
def callback(request):
template_data = {}
return render(request, 'callback.html', template_data)
|
{"/joonggo/serializers.py": ["/joonggo/models.py"], "/api/views.py": ["/joonggo/models.py", "/joonggo/serializers.py"], "/joonggobot/joonggobot_main.py": ["/joonggo/models.py"], "/joonggobot/signals.py": ["/joonggo/models.py", "/joonggobot/joonggobot_main.py"], "/joonggo/views.py": ["/joonggo/models.py"], "/joonggo/scraper/checkers.py": ["/joonggo/models.py"], "/joonggo/tasks.py": ["/joonggo/models.py"], "/joonggo/admin.py": ["/joonggo/models.py"], "/joonggo/scraper/cetizen_spiders.py": ["/joonggo/models.py"], "/joonggo/scraper/naver_standalone_checker.py": ["/joonggo/models.py"], "/joonggo/scraper/spiders.py": ["/joonggo/models.py"], "/joonggobot/apps.py": ["/joonggobot/signals.py"], "/joonggo/scraper/cetizen_standalone_checker.py": ["/joonggo/models.py"], "/joonggobot/views.py": ["/joonggobot/joonggobot_main.py"], "/api/urls.py": ["/api/views.py"]}
|
19,409,914
|
kathar0s/maldives
|
refs/heads/develop
|
/joonggo/models.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from dynamic_scraper.models import Scraper, SchedulerRuntime
from scrapy_djangoitem import DjangoItem
from django.db.models.signals import pre_delete
from django.dispatch import receiver
from django.conf import settings
class Source(models.Model):
name = models.CharField('이름', max_length=20, default='')
category = models.CharField('카테고리', max_length=20, default='', blank=True)
url = models.URLField('URL')
login_url = models.URLField('Login URL', blank=True, default='')
scraper = models.ForeignKey(Scraper, blank=True, null=True, on_delete=models.SET_NULL)
scraper_runtime = models.ForeignKey(SchedulerRuntime, blank=True, null=True, on_delete=models.SET_NULL)
base_url = models.URLField('PC Base URL', blank=True, default='')
mobile_base_url = models.URLField('mobile Base URL', blank=True, default='')
site_image_url = models.CharField('site base image', max_length=256, blank=True, default='')
def __str__(self):
return '[{name}] {category}'.format(name=self.name, category=self.category)
def __unicode__(self):
return u'[{name}] {category}'.format(name=self.name, category=self.category)
class Meta:
verbose_name = u'출처'
verbose_name_plural = u'출처 목록'
# 중고 물건 내용
class Article(models.Model):
source = models.ForeignKey(Source, verbose_name='출처')
# 필수 항목
uid = models.CharField('고유번호', max_length=20, default='', unique=True, db_index=True,
help_text='고유번호를 말함')
title = models.CharField('제목', db_index=True, max_length=200, default='')
content = models.TextField('내용', default='', blank=True)
price = models.PositiveIntegerField('가격', default=0, blank=True, db_index=True)
url = models.URLField('링크', default='', blank=True, max_length=512, help_text='해당 글 주소')
# 옵션 항목
tags = models.TextField('태그', default='', blank=True, help_text='콤마로 구분')
survival_count = models.PositiveIntegerField('글 유지', default=1, blank=True, db_index=True)
is_include_parcel = models.BooleanField('택배포함여부', default=False, blank=True)
is_sold_out = models.BooleanField('판매여부', default=False, blank=True, db_index=True)
created = models.DateTimeField('등록일', auto_now_add=True)
checker_runtime = models.ForeignKey(SchedulerRuntime, blank=True, null=True, on_delete=models.SET_NULL)
def __str__(self):
return '[{source}] {title}'.format(source=self.source, title=self.title)
def __unicode__(self):
return u'[{source}] {title}'.format(source=self.source, title=self.title)
class Meta:
verbose_name = '게시물'
verbose_name_plural = '게시물 목록'
class ArticleItem(DjangoItem):
django_model = Article
@receiver(pre_delete)
def pre_delete_handler(sender, instance, using, **kwargs):
if isinstance(instance, Source):
if instance.scraper_runtime:
instance.scraper_runtime.delete()
if isinstance(instance, Article):
if instance.checker_runtime:
instance.checker_runtime.delete()
pre_delete.connect(pre_delete_handler)
# 봇에서 시작한 사용자 Profile
class ChatProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
chat = models.PositiveIntegerField('채팅 아이디', default=0)
password = models.CharField('패스워드', max_length=50, default='')
def __unicode__(self):
return u'[{user}] {chat_id}'.format(user=self.user, chat_id=self.chat)
class Meta:
verbose_name = '알림(Telegram)'
verbose_name_plural = '알림(Telegram) 목록'
# 사용자별 / ID별 알림 내용
class Alarm(models.Model):
profile = models.ForeignKey(ChatProfile, on_delete=models.CASCADE, default=None, null=True, blank=True,
verbose_name='로그인')
user = models.ForeignKey(User, on_delete=models.CASCADE, default=None, null=True, verbose_name='사용자')
keyword = models.CharField('키워드', max_length=50, default='')
price = models.PositiveIntegerField('가격', default=0)
disabled = models.BooleanField('비활성화 여부', default=False)
created = models.DateTimeField('등록일', auto_now_add=True)
def save(self, *args, **kwargs):
if self.profile and not self.user:
self.user = self.profile.user
super(Alarm, self).save(*args, **kwargs)
class Meta:
verbose_name = '알림'
verbose_name_plural = '알림 목록'
# 사용자들이 검색한 키워드들을 모아보고 싶은 용도
class SearchKeyword(models.Model):
keyword = models.CharField('키워드', db_index=True, max_length=50, default='')
created = models.DateTimeField('등록일', auto_now_add=True)
class Meta:
verbose_name = '검색 키워드'
verbose_name_plural = '검색 키워드 목록'
class Advertise(models.Model):
title = models.CharField('제목', max_length=50, default='')
banner = models.ImageField('배너', upload_to='banner/')
created = models.DateTimeField('등록일', auto_now_add=True)
def __unicode__(self):
return u'{title}'.format(title=self.title)
class Meta:
verbose_name = '광고'
verbose_name_plural = '광고 목록'
|
{"/joonggo/serializers.py": ["/joonggo/models.py"], "/api/views.py": ["/joonggo/models.py", "/joonggo/serializers.py"], "/joonggobot/joonggobot_main.py": ["/joonggo/models.py"], "/joonggobot/signals.py": ["/joonggo/models.py", "/joonggobot/joonggobot_main.py"], "/joonggo/views.py": ["/joonggo/models.py"], "/joonggo/scraper/checkers.py": ["/joonggo/models.py"], "/joonggo/tasks.py": ["/joonggo/models.py"], "/joonggo/admin.py": ["/joonggo/models.py"], "/joonggo/scraper/cetizen_spiders.py": ["/joonggo/models.py"], "/joonggo/scraper/naver_standalone_checker.py": ["/joonggo/models.py"], "/joonggo/scraper/spiders.py": ["/joonggo/models.py"], "/joonggobot/apps.py": ["/joonggobot/signals.py"], "/joonggo/scraper/cetizen_standalone_checker.py": ["/joonggo/models.py"], "/joonggobot/views.py": ["/joonggobot/joonggobot_main.py"], "/api/urls.py": ["/api/views.py"]}
|
19,409,915
|
kathar0s/maldives
|
refs/heads/develop
|
/joonggobot/urls.py
|
# -*- coding: utf-8 -*-
"""maldives URL Configuration
"""
from django.conf.urls import url
from joonggobot import views
urlpatterns = [
url(r'webhook$', views.webhook, name='webhook'),
url(r'webhook_polling$', views.webhook_polling, name='webhook_polling'),
]
|
{"/joonggo/serializers.py": ["/joonggo/models.py"], "/api/views.py": ["/joonggo/models.py", "/joonggo/serializers.py"], "/joonggobot/joonggobot_main.py": ["/joonggo/models.py"], "/joonggobot/signals.py": ["/joonggo/models.py", "/joonggobot/joonggobot_main.py"], "/joonggo/views.py": ["/joonggo/models.py"], "/joonggo/scraper/checkers.py": ["/joonggo/models.py"], "/joonggo/tasks.py": ["/joonggo/models.py"], "/joonggo/admin.py": ["/joonggo/models.py"], "/joonggo/scraper/cetizen_spiders.py": ["/joonggo/models.py"], "/joonggo/scraper/naver_standalone_checker.py": ["/joonggo/models.py"], "/joonggo/scraper/spiders.py": ["/joonggo/models.py"], "/joonggobot/apps.py": ["/joonggobot/signals.py"], "/joonggo/scraper/cetizen_standalone_checker.py": ["/joonggo/models.py"], "/joonggobot/views.py": ["/joonggobot/joonggobot_main.py"], "/api/urls.py": ["/api/views.py"]}
|
19,409,916
|
kathar0s/maldives
|
refs/heads/develop
|
/joonggo/scraper/checkers.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from dynamic_scraper.spiders.django_checker import DjangoChecker
from scrapy import Selector
from joonggo.models import Article
from scrapy.http import Request, FormRequest
class ArticleChecker(DjangoChecker):
name = 'article_checker'
def __init__(self, *args, **kwargs):
self._set_ref_object(Article, **kwargs)
self.scraper = self.ref_object.source.scraper
self.scrape_url = self.ref_object.url
self.scheduler_runtime = self.ref_object.checker_runtime
super(ArticleChecker, self).__init__(self, *args, **kwargs)
|
{"/joonggo/serializers.py": ["/joonggo/models.py"], "/api/views.py": ["/joonggo/models.py", "/joonggo/serializers.py"], "/joonggobot/joonggobot_main.py": ["/joonggo/models.py"], "/joonggobot/signals.py": ["/joonggo/models.py", "/joonggobot/joonggobot_main.py"], "/joonggo/views.py": ["/joonggo/models.py"], "/joonggo/scraper/checkers.py": ["/joonggo/models.py"], "/joonggo/tasks.py": ["/joonggo/models.py"], "/joonggo/admin.py": ["/joonggo/models.py"], "/joonggo/scraper/cetizen_spiders.py": ["/joonggo/models.py"], "/joonggo/scraper/naver_standalone_checker.py": ["/joonggo/models.py"], "/joonggo/scraper/spiders.py": ["/joonggo/models.py"], "/joonggobot/apps.py": ["/joonggobot/signals.py"], "/joonggo/scraper/cetizen_standalone_checker.py": ["/joonggo/models.py"], "/joonggobot/views.py": ["/joonggobot/joonggobot_main.py"], "/api/urls.py": ["/api/views.py"]}
|
19,409,917
|
kathar0s/maldives
|
refs/heads/develop
|
/joonggobot/joonggobot_main.py
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
from django_pandas.io import read_frame
from joonggo.models import ChatProfile, Article, Alarm, Source
from django.contrib.auth.models import User
from django.db.models import Q
from functools import reduce
import operator
import telegram
import sys
import datetime
import re
class JoonggoBot:
WEBHOOK_URL = 'http://52.78.186.61/joonggobot/webhook_polling'
TELEGRAM_TOKEN = '373562267:AAGVYqG7JFud4tCePUdq-Bkd-Y6-dZsP568'
@staticmethod
def get_token():
return JoonggoBot.TELEGRAM_TOKEN
@staticmethod
def get_webhookurl():
return JoonggoBot.WEBHOOK_URL
def get_chat_profile(self, id):
try:
profile = ChatProfile.objects.get(chat=id)
except ChatProfile.DoesNotExist:
profile = None
return profile
def __init__(self):
reload(sys)
sys.setdefaultencoding('utf-8')
self.token = JoonggoBot.TELEGRAM_TOKEN
self.telegram_bot = telegram.Bot(JoonggoBot.TELEGRAM_TOKEN)
self.handler = {'start' : self.handle_start,
'stop': self.handle_stop,
'help': self.handle_help,
'search': self.handle_search,
'register_alarm': self.handle_add_alarm,
'list_alarm': self.handle_list_alarm,
'remove_alarm': self.handle_remove_alarm,
'password_alarm': self.handle_password_alarm,}
def handle_start(self, id, message):
profile = self.get_chat_profile(id)
if profile is None:
name = u"bot_%s" % (id)
user = User.objects.create_user(name, "", "")
user.profile = ChatProfile.objects.create(user=user, chat=id)
user.save()
send_message = u"환영합니다.\n웹에서 알림 등록 시 아래 토큰을 활용해주세요\n%d" % (id)
self.send_message(id, send_message)
def handle_stop(self, id, message):
send_message = u"봇과의 연결을 종료합니다"
profile = self.get_chat_profile(id)
if profile is not None:
profile.user.delete()
profile.delete()
self.send_message(id, send_message)
def handle_help(self, id, message):
send_message = u"명령어 도움말\n\n"
send_message += u"1.키워드 검색 예)\n\"아이폰7\"\n\n"
send_message += u"2.알림 등록 예\n\"/알림등록 아이폰7\"\n\n"
send_message += u"3.알림 확인 예\n\"/알림목록\"\n\n"
send_message += u"4.알림 삭제 예\n\"/알림삭제 아이폰7\"\n\n"
send_message += u"5.알림 암호 예\n\"/알림암호 *****\"\n\n"
self.send_message(id, send_message)
def handle_add_alarm(self, id, message):
keyword = message.split(u"/알림등록")[1].strip()
profile = self.get_chat_profile(id)
if profile is not None:
alarm = Alarm.objects.create(profile=profile, keyword=keyword)
alarm.save()
send_message = u"%d 토큰에 알림을 등록하였습니다\n키워드=%s" % (id, keyword)
else:
send_message = u"%d 토큰의 사용자 정보가 존재하지 않습니다\n다시 봇을 시작해주세요" % (id)
self.send_message(id, send_message)
def handle_list_alarm(self, id, message):
profile = self.get_chat_profile(id)
if profile is None:
send_message = u"%d 토큰의 사용자 정보가 존재하지 않습니다\n다시 봇을 시작해주세요" % (id)
else:
alarms = Alarm.objects.filter(profile=profile)
send_message = u"알림등록 목록(%d) = %d 개\n\n" % (id, len(alarms))
for alarm in alarms:
send_message += u"등록 키워드 : %s\n" % (alarm.keyword)
send_message += u"가격 기준 : %s\n\n" % (alarm.price)
self.send_message(id, send_message)
def handle_remove_alarm(self, id, message):
profile = self.get_chat_profile(id)
if profile is None:
send_message = u"%d 토큰의 사용자 정보가 존재하지 않습니다\n다시 봇을 시작해주세요" % (id)
else:
keyword = message.split(u"/알림삭제")
if len(keyword) < 1 or len(keyword[1]) < 1:
Alarm.objects.filter(profile=profile).delete()
send_message = u"%d 토큰의 모든 알림을 삭제하였습니다" % (id)
else:
Alarm.objects.filter(profile=profile, keyword=keyword[1].strip()).delete()
send_message = u"%d 토큰의 \"%s\" 알림을 삭제하였습니다" % (id, keyword[1].strip())
self.send_message(id, send_message)
def handle_password_alarm(self, id, message):
profile = self.get_chat_profile(id)
if profile is None:
send_message = u"%d 토큰의 사용자 정보가 존재하지 않습니다\n다시 봇을 시작해주세요" % (id)
else:
keyword = message.split(u"/알림암호")
if len(keyword) < 1 or len(keyword[1]) < 1:
send_message = u"%d 토큰의 암호를 지정하세요" % (id)
else:
profile.user.set_password(keyword[1].strip())
profile.user.save()
send_message = u"%d 토큰의 암호를 \'%s\' 로 설정하였습니다" % (id, keyword[1].strip())
self.send_message(id, send_message)
def handle_search(self, id, message):
end_date = datetime.date.today() # 현재 날짜 가져오기
period = datetime.timedelta(days=13)
start_date = end_date - period
queryset = Article.objects.filter(created__gte=start_date).order_by('-created')
# queryset = queryset.filter(title__contains=message, is_sold_out=False)
message_list = message.split(' ')
query = (Q(is_sold_out=False) & Q(survival_count__gte=1) & Q(price__gte=10000))
for msg in message_list:
query &= Q(title__contains=msg)
queryset = queryset.filter(query)
title_exclude = ['삽니다', '구합니다', '배터리']
for t in title_exclude:
queryset = queryset.exclude(title__contains=t)
if queryset.count() > 0:
article_data = read_frame(queryset,
fieldnames=['title', 'price', 'url', 'created', 'source_id', 'uid'])
# title 중복 제거
article_data = article_data.sort_values('price', ascending=True).drop_duplicates('title')
# 평균값의 20%의 가격으로 최저가 책정/ 평균값의 3배 가격으로 최고가 책정
avg = article_data['price'].mean()
article_data = article_data[article_data['price'] >= avg * 0.2]
article_data = article_data[article_data['price'] < avg * 3]
article_data = article_data[article_data['price'] % 100 == 0]
article_data = article_data.reset_index(drop=True)
item_list = article_data[:10]
query_result = u"검색 결과 = %d 개\n\n" % (len(item_list))
for index, row in item_list.iterrows():
query_result += u"가격 : %s\n" % (row['price'])
query_result += u"날짜 : %s\n" % (str(row['created']).split(".")[0])
query_result += u"제목 : %s\n" % (' '.join(row['title'].split()))
default_url = u"%s\n\n" % (row['url'])
words = re.search(r"\[(.*)\]", row['source_id'])
if words:
source = Source.objects.filter(name=words.group(0)[1:-1]).first()
if source is not None:
default_url = u"%s%s\n\n" % (source.mobile_base_url, row['uid'])
query_result += default_url
else:
query_result = u"검색 결과가 존재하지 않습니다\n\n"
self.send_message(id, query_result)
def send_message(self, id, message):
self.telegram_bot.sendMessage(id, message)
def handle(self, id, type, message):
if type in self.handler:
self.handler[type](id, message)
else:
print("handler error")
|
{"/joonggo/serializers.py": ["/joonggo/models.py"], "/api/views.py": ["/joonggo/models.py", "/joonggo/serializers.py"], "/joonggobot/joonggobot_main.py": ["/joonggo/models.py"], "/joonggobot/signals.py": ["/joonggo/models.py", "/joonggobot/joonggobot_main.py"], "/joonggo/views.py": ["/joonggo/models.py"], "/joonggo/scraper/checkers.py": ["/joonggo/models.py"], "/joonggo/tasks.py": ["/joonggo/models.py"], "/joonggo/admin.py": ["/joonggo/models.py"], "/joonggo/scraper/cetizen_spiders.py": ["/joonggo/models.py"], "/joonggo/scraper/naver_standalone_checker.py": ["/joonggo/models.py"], "/joonggo/scraper/spiders.py": ["/joonggo/models.py"], "/joonggobot/apps.py": ["/joonggobot/signals.py"], "/joonggo/scraper/cetizen_standalone_checker.py": ["/joonggo/models.py"], "/joonggobot/views.py": ["/joonggobot/joonggobot_main.py"], "/api/urls.py": ["/api/views.py"]}
|
19,409,918
|
kathar0s/maldives
|
refs/heads/develop
|
/joonggo/tasks.py
|
# -*- coding: utf-8 -*-
from celery.task import task
from dynamic_scraper.utils.task_utils import TaskUtils
from joonggo.models import Source, Article
@task()
def run_spiders():
t = TaskUtils()
t.run_spiders(Source, 'scraper', 'scraper_runtime', 'article_spider')
t.run_spiders(Source, 'scraper', 'scraper_runtime', 'cetizen_article_spider')
@task()
def run_checkers():
t = TaskUtils()
t.run_checkers(Article, 'source__scraper', 'checker_runtime', 'article_checker')
|
{"/joonggo/serializers.py": ["/joonggo/models.py"], "/api/views.py": ["/joonggo/models.py", "/joonggo/serializers.py"], "/joonggobot/joonggobot_main.py": ["/joonggo/models.py"], "/joonggobot/signals.py": ["/joonggo/models.py", "/joonggobot/joonggobot_main.py"], "/joonggo/views.py": ["/joonggo/models.py"], "/joonggo/scraper/checkers.py": ["/joonggo/models.py"], "/joonggo/tasks.py": ["/joonggo/models.py"], "/joonggo/admin.py": ["/joonggo/models.py"], "/joonggo/scraper/cetizen_spiders.py": ["/joonggo/models.py"], "/joonggo/scraper/naver_standalone_checker.py": ["/joonggo/models.py"], "/joonggo/scraper/spiders.py": ["/joonggo/models.py"], "/joonggobot/apps.py": ["/joonggobot/signals.py"], "/joonggo/scraper/cetizen_standalone_checker.py": ["/joonggo/models.py"], "/joonggobot/views.py": ["/joonggobot/joonggobot_main.py"], "/api/urls.py": ["/api/views.py"]}
|
19,409,919
|
kathar0s/maldives
|
refs/heads/develop
|
/joonggo/scraper/scraper_task.py
|
# -*- coding: utf-8 -*-
#!/usr/bin/python
import os
import sys
import django
import threading
import time
import subprocess
import signal
class CrawlerThread(threading.Thread):
def __init__(self, id, name, source):
threading.Thread.__init__(self)
self.id = id
self.name = name
self.source = source
self._stop = threading.Event()
def run(self):
default_spider = "article_spider"
if source.name.startswith(u'세티즌'):
default_spider = "cetizen_article_spider"
max_waiting = 60
command = "scrapy crawl %s -a id=%d -a do_action=yes" % (default_spider, self.id)
while not self.stopped():
start_time = time.time()
print("%s - %d start crawling\n" % (self.name, self.id))
subprocess.call(command, shell=True)
delta = max_waiting - (time.time() - start_time)
if delta > 0:
print("%s - %d waiting %d seconds\n" % (self.name, self.id, delta))
time.sleep(delta)
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
class GracefulKiller:
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self, signum, frame):
self.kill_now = True
if __name__ == '__main__':
sys.path.append(sys.argv[1])
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "maldives.settings.production")
django.setup()
from joonggo.models import Article, ArticleItem, Source
threads = []
sources = Source.objects.all()
for source in sources:
thread1 = CrawlerThread(source.id, "thread", source)
thread1.start()
threads.append(thread1)
killer = GracefulKiller()
while True:
time.sleep(10)
if killer.kill_now:
print("Trying to kill ...")
for t in threads:
t.stop()
for t in threads:
t.join()
break
print "Exiting Crawler main"
|
{"/joonggo/serializers.py": ["/joonggo/models.py"], "/api/views.py": ["/joonggo/models.py", "/joonggo/serializers.py"], "/joonggobot/joonggobot_main.py": ["/joonggo/models.py"], "/joonggobot/signals.py": ["/joonggo/models.py", "/joonggobot/joonggobot_main.py"], "/joonggo/views.py": ["/joonggo/models.py"], "/joonggo/scraper/checkers.py": ["/joonggo/models.py"], "/joonggo/tasks.py": ["/joonggo/models.py"], "/joonggo/admin.py": ["/joonggo/models.py"], "/joonggo/scraper/cetizen_spiders.py": ["/joonggo/models.py"], "/joonggo/scraper/naver_standalone_checker.py": ["/joonggo/models.py"], "/joonggo/scraper/spiders.py": ["/joonggo/models.py"], "/joonggobot/apps.py": ["/joonggobot/signals.py"], "/joonggo/scraper/cetizen_standalone_checker.py": ["/joonggo/models.py"], "/joonggobot/views.py": ["/joonggobot/joonggobot_main.py"], "/api/urls.py": ["/api/views.py"]}
|
19,409,920
|
kathar0s/maldives
|
refs/heads/develop
|
/joonggo/admin.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from reversion.admin import VersionAdmin
from joonggo.models import Source, Article, ChatProfile, Advertise
class SourceAdmin(VersionAdmin):
list_display = ('name', 'category', 'url_')
ordering = ('-id',)
def url_(self, instance):
return '<a href="{url}" target="_blank">{title}</a>'.format(
url=instance.url, title=instance.url)
url_.allow_tags = True
admin.site.register(Source, SourceAdmin)
class ArticleAdmin(VersionAdmin):
list_display = ('title', 'price', 'url_', 'created')
ordering = ('-id', )
def url_(self, instance):
return '<a href="{url}" target="_blank">{title}</a>'.format(
url=instance.url, title=instance.url)
url_.allow_tags = True
admin.site.register(Article, ArticleAdmin)
class ChatProfileAdmin(VersionAdmin):
list_display = ('user', 'chat')
ordering = ('-id', )
admin.site.register(ChatProfile, ChatProfileAdmin)
class AdvertiseAdmin(VersionAdmin):
list_display = ('title', )
ordering = ('-id', )
admin.site.register(Advertise, AdvertiseAdmin)
|
{"/joonggo/serializers.py": ["/joonggo/models.py"], "/api/views.py": ["/joonggo/models.py", "/joonggo/serializers.py"], "/joonggobot/joonggobot_main.py": ["/joonggo/models.py"], "/joonggobot/signals.py": ["/joonggo/models.py", "/joonggobot/joonggobot_main.py"], "/joonggo/views.py": ["/joonggo/models.py"], "/joonggo/scraper/checkers.py": ["/joonggo/models.py"], "/joonggo/tasks.py": ["/joonggo/models.py"], "/joonggo/admin.py": ["/joonggo/models.py"], "/joonggo/scraper/cetizen_spiders.py": ["/joonggo/models.py"], "/joonggo/scraper/naver_standalone_checker.py": ["/joonggo/models.py"], "/joonggo/scraper/spiders.py": ["/joonggo/models.py"], "/joonggobot/apps.py": ["/joonggobot/signals.py"], "/joonggo/scraper/cetizen_standalone_checker.py": ["/joonggo/models.py"], "/joonggobot/views.py": ["/joonggobot/joonggobot_main.py"], "/api/urls.py": ["/api/views.py"]}
|
19,409,921
|
kathar0s/maldives
|
refs/heads/develop
|
/joonggo/scraper/cetizen_spiders.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import importlib
from dynamic_scraper.spiders.django_spider import DjangoSpider
from scrapy.http import Request, FormRequest
from dynamic_scraper.utils import processors
from scrapy.loader.processors import Join
from scrapy.loader.processors import TakeFirst
from joonggo.models import Article, ArticleItem, Source
from scrapy import Selector
import json, logging
import scrapy
class CetizenArticleSpider(DjangoSpider):
name = 'cetizen_article_spider'
start_urls = [
'http://market.cetizen.com/market.php?auc_sale=1',
]
def __init__(self, *args, **kwargs):
self._set_ref_object(Source, **kwargs)
self.scraper = self.ref_object.scraper
self.scrape_url = self.ref_object.url
self.login_page = self.ref_object.login_url
self.scheduler_runtime = self.ref_object.scraper_runtime
self.scraped_obj_class = Article
self.scraped_obj_item_class = ArticleItem
super(CetizenArticleSpider, self).__init__(self, *args, **kwargs)
def start_requests(self):
index = 0
start_urls = self.start_urls
print('start_requests')
for url in start_urls:
self._set_meta_splash_args()
kwargs = self.mp_request_kwargs.copy()
if self.mp_form_data:
form_data = self.mp_form_data.copy()
else:
form_data = None
if 'headers' in kwargs:
kwargs['headers'] = json.loads(json.dumps(kwargs['headers']).replace('{page}', str(self.pages[index])))
if 'body' in kwargs:
kwargs['body'] = kwargs['body'].replace('{page}', str(self.pages[index]))
if 'cookies' in kwargs:
kwargs['cookies'] = json.loads(json.dumps(kwargs['cookies']).replace('{page}', str(self.pages[index])))
if form_data:
form_data = json.loads(json.dumps(form_data).replace('{page}', str(self.pages[index])))
if 'meta' not in kwargs:
kwargs['meta'] = {}
kwargs['meta']['page'] = index + 1
rpt = self.scraper.get_main_page_rpt()
self.dds_logger.info('')
self.dds_logger.info(self.bcolors['BOLD'] +
'======================================================================================' +
self.bcolors['ENDC'])
self.log("{es}{es2}Scraping data from page {page}.{ec}{ec}".format(
page=index + 1, es=self.bcolors['BOLD'], es2=self.bcolors['HEADER'], ec=self.bcolors['ENDC']),
logging.INFO)
self.log("URL: {url}".format(url=url), logging.INFO)
self.dds_logger.info(self.bcolors['BOLD'] +
'======================================================================================' +
self.bcolors['ENDC'])
yield Request(url, callback=self.parse, method=rpt.method, dont_filter=rpt.dont_filter, **kwargs)
print('end start_requests')
def main_parse(self, response):
for sel in response.xpath("//span[@class=\"clr100\" or @class=\"clr01\"]/a"):
link = sel.xpath('@href').extract()[0]
full_url = u"http://market.cetizen.com" + link
yield Request(full_url, callback=self.detail_parse)
def detail_parse(self, response):
# 고유 아이디, 물품번호
uid = response.xpath("//div/span[@class=\"p14 clr02 ls-0\"]/text()").re_first(r'물품번호\s*:\s*(\d+)')
##날짜
date = response.xpath("//div/span/span[@class=\"p12 ls-0\"]/text()").re_first(r'\((.*)\)')
#제목
title = response.xpath("//div/span[@class=\"p17 clr04\"]/text()").extract_first()
#가격
price = response.xpath("//div/span[@class=\"clr03 p21\"]/text()").re_first(r'(.*)').replace(',', '')
#내용
content = response.xpath("//div[@class=\"ln24 p14 clr01\"]//text()").extract()
print(u"%s\n%s\n%s\n%s\n%s\n" % (uid, date, title, price, content))
|
{"/joonggo/serializers.py": ["/joonggo/models.py"], "/api/views.py": ["/joonggo/models.py", "/joonggo/serializers.py"], "/joonggobot/joonggobot_main.py": ["/joonggo/models.py"], "/joonggobot/signals.py": ["/joonggo/models.py", "/joonggobot/joonggobot_main.py"], "/joonggo/views.py": ["/joonggo/models.py"], "/joonggo/scraper/checkers.py": ["/joonggo/models.py"], "/joonggo/tasks.py": ["/joonggo/models.py"], "/joonggo/admin.py": ["/joonggo/models.py"], "/joonggo/scraper/cetizen_spiders.py": ["/joonggo/models.py"], "/joonggo/scraper/naver_standalone_checker.py": ["/joonggo/models.py"], "/joonggo/scraper/spiders.py": ["/joonggo/models.py"], "/joonggobot/apps.py": ["/joonggobot/signals.py"], "/joonggo/scraper/cetizen_standalone_checker.py": ["/joonggo/models.py"], "/joonggobot/views.py": ["/joonggobot/joonggobot_main.py"], "/api/urls.py": ["/api/views.py"]}
|
19,409,922
|
kathar0s/maldives
|
refs/heads/develop
|
/joonggo/scraper/naver_standalone_checker.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from dynamic_scraper.spiders.django_spider import DjangoSpider
from scrapy.http import Request
from joonggo.models import Article, ArticleItem, Source
from django.db.models import Q
from scrapy.http import Request, FormRequest
from scrapy import Selector
class NaverStandaloneChecker(DjangoSpider):
name = 'naver_standalone_checker'
start_urls = [
'https://nid.naver.com/nidlogin.login',
]
def __init__(self, *args, **kwargs):
self._set_ref_object(Source, **kwargs)
self.scraper = self.ref_object.scraper
self.scrape_url = self.ref_object.url
self.login_page = self.ref_object.login_url
self.scheduler_runtime = self.ref_object.scraper_runtime
self.scraped_obj_class = Article
self.scraped_obj_item_class = ArticleItem
super(NaverStandaloneChecker, self).__init__(self, *args, **kwargs)
def start_requests(self):
print(u'start_requests start')
yield Request(url=self.login_page, callback=self.login, dont_filter=True)
print(u'start_requests ends')
def request_next_url(self):
articles = Article.objects.filter(Q(source=self.ref_object) & Q(survival_count=1)).order_by("id")[:1]
if len(articles) > 0:
article = articles[0]
#print(u"checking : %d %s %s %s\n" % (article.id, article.title, article.uid, article.url))
return Request(article.url, callback=self.detail_parse, meta={'article': article})
else:
print(u"checking ended!\n")
return None
def detail_parse(self, response):
article = response.meta['article']
error_content = response.xpath("//div[@class=\"error_content_body\"]/h2/text()").extract_first()
if error_content is None:
article.survival_count += 1
print("content is valid!, keep going\n")
else:
article.survival_count = 0
article.is_sold_out = True
print("content was removed : %s\n" % (error_content))
article.save()
yield self.request_next_url()
def login(self, response):
print('naver standalone checker login try')
login_data = {'id': 'sep521', 'pw': 'sep521sep521'}
return FormRequest.from_response(response,
formdata=login_data,
callback=self.check_login_response)
def check_login_response(self, response):
print('naver standalone checker response check')
selector = Selector(response)
error_element = selector.xpath('//div[@id="err_common"]')
# 에러가 없으면 로그인이 성공하였다는 말이므로..
if len(error_element) == 0:
print('login success!!')
yield self.request_next_url()
else:
print('login failed, abort checking!!')
|
{"/joonggo/serializers.py": ["/joonggo/models.py"], "/api/views.py": ["/joonggo/models.py", "/joonggo/serializers.py"], "/joonggobot/joonggobot_main.py": ["/joonggo/models.py"], "/joonggobot/signals.py": ["/joonggo/models.py", "/joonggobot/joonggobot_main.py"], "/joonggo/views.py": ["/joonggo/models.py"], "/joonggo/scraper/checkers.py": ["/joonggo/models.py"], "/joonggo/tasks.py": ["/joonggo/models.py"], "/joonggo/admin.py": ["/joonggo/models.py"], "/joonggo/scraper/cetizen_spiders.py": ["/joonggo/models.py"], "/joonggo/scraper/naver_standalone_checker.py": ["/joonggo/models.py"], "/joonggo/scraper/spiders.py": ["/joonggo/models.py"], "/joonggobot/apps.py": ["/joonggobot/signals.py"], "/joonggo/scraper/cetizen_standalone_checker.py": ["/joonggo/models.py"], "/joonggobot/views.py": ["/joonggobot/joonggobot_main.py"], "/api/urls.py": ["/api/views.py"]}
|
19,409,923
|
kathar0s/maldives
|
refs/heads/develop
|
/joonggo/scraper/spiders.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import importlib
from dynamic_scraper.spiders.django_spider import DjangoSpider
from scrapy.http import Request, FormRequest
from dynamic_scraper.utils import processors
from scrapy.loader.processors import Join
from joonggo.models import Article, ArticleItem, Source
from scrapy.utils.spider import iterate_spider_output
from scrapy import Selector
import json, logging
class ArticleSpider(DjangoSpider):
name = 'article_spider'
def __init__(self, *args, **kwargs):
self._set_ref_object(Source, **kwargs)
self.scraper = self.ref_object.scraper
self.scrape_url = self.ref_object.url
self.login_page = self.ref_object.login_url
self.scheduler_runtime = self.ref_object.scraper_runtime
self.scraped_obj_class = Article
self.scraped_obj_item_class = ArticleItem
super(ArticleSpider, self).__init__(self, *args, **kwargs)
def start_requests(self):
index = 0
start_urls = ['https://nid.naver.com/nidlogin.login'] + self.start_urls
print('start_requests')
for url in start_urls:
if index == 0 and 'login' in url:
yield Request(
url=self.login_page,
callback=self.login,
dont_filter=True
)
continue
self._set_meta_splash_args()
kwargs = self.mp_request_kwargs.copy()
if self.mp_form_data:
form_data = self.mp_form_data.copy()
else:
form_data = None
if 'headers' in kwargs:
kwargs['headers'] = json.loads(json.dumps(kwargs['headers']).replace('{page}', str(self.pages[index])))
if 'body' in kwargs:
kwargs['body'] = kwargs['body'].replace('{page}', str(self.pages[index]))
if 'cookies' in kwargs:
kwargs['cookies'] = json.loads(json.dumps(kwargs['cookies']).replace('{page}', str(self.pages[index])))
if form_data:
form_data = json.loads(json.dumps(form_data).replace('{page}', str(self.pages[index])))
if 'meta' not in kwargs:
kwargs['meta'] = {}
kwargs['meta']['page'] = index + 1
rpt = self.scraper.get_main_page_rpt()
self.dds_logger.info('')
self.dds_logger.info(self.bcolors['BOLD'] +
'======================================================================================' +
self.bcolors['ENDC'])
self.log("{es}{es2}Scraping data from page {page}.{ec}{ec}".format(
page=index + 1, es=self.bcolors['BOLD'], es2=self.bcolors['HEADER'], ec=self.bcolors['ENDC']),
logging.INFO)
self.log("URL: {url}".format(url=url), logging.INFO)
self.dds_logger.info(self.bcolors['BOLD'] +
'======================================================================================' +
self.bcolors['ENDC'])
print('end start_requests')
index += 1
if rpt.request_type == 'R':
yield Request(url, callback=self.parse, method=rpt.method, dont_filter=rpt.dont_filter, **kwargs)
else:
yield FormRequest(url, callback=self.parse, method=rpt.method, formdata=form_data,
dont_filter=rpt.dont_filter, **kwargs)
def login(self, response):
print('login try')
login_data = {'id': 'sep521', 'pw': 'sep521sep521'}
return FormRequest.from_response(response,
formdata=login_data,
callback=self.check_login_response)
def check_login_response(self, response):
print('check_login_response')
selector = Selector(response)
error_element = selector.xpath('//div[@id="err_common"]')
# 에러가 없으면 로그인이 성공하였다는 말이므로..
if len(error_element) == 0:
self.log('Login sucesss!')
else:
self.log("Login fail")
def _get_processors(self, procs_str):
procs = [Join(), processors.string_strip, ]
if not procs_str:
return procs
procs_tmp = list(procs_str.split(','))
for p in procs_tmp:
p = p.strip()
added = False
if hasattr(processors, p):
procs.append(getattr(processors, p))
added = True
for cp_path in self.conf['CUSTOM_PROCESSORS']:
try:
custom_processors = importlib.import_module(cp_path)
if hasattr(custom_processors, p):
procs.append(getattr(custom_processors, p))
added = True
except ImportError:
pass
if not added:
self.log("Processor '{p}' is not defined!".format(p=p), logging.ERROR)
procs = tuple(procs)
return procs
|
{"/joonggo/serializers.py": ["/joonggo/models.py"], "/api/views.py": ["/joonggo/models.py", "/joonggo/serializers.py"], "/joonggobot/joonggobot_main.py": ["/joonggo/models.py"], "/joonggobot/signals.py": ["/joonggo/models.py", "/joonggobot/joonggobot_main.py"], "/joonggo/views.py": ["/joonggo/models.py"], "/joonggo/scraper/checkers.py": ["/joonggo/models.py"], "/joonggo/tasks.py": ["/joonggo/models.py"], "/joonggo/admin.py": ["/joonggo/models.py"], "/joonggo/scraper/cetizen_spiders.py": ["/joonggo/models.py"], "/joonggo/scraper/naver_standalone_checker.py": ["/joonggo/models.py"], "/joonggo/scraper/spiders.py": ["/joonggo/models.py"], "/joonggobot/apps.py": ["/joonggobot/signals.py"], "/joonggo/scraper/cetizen_standalone_checker.py": ["/joonggo/models.py"], "/joonggobot/views.py": ["/joonggobot/joonggobot_main.py"], "/api/urls.py": ["/api/views.py"]}
|
19,409,924
|
kathar0s/maldives
|
refs/heads/develop
|
/joonggo/templatetags/to_pc_url.py
|
# -*- coding: utf-8 -*-
from django import template
import re
register = template.Library()
@register.filter
def to_pc_url(value):
return value.replace("/m.", "/")
@register.filter
def covert_to_write(value):
ids = re.findall(r'clubid=(\d*).*menuid=(\d*)', value)
# clubid와 menuid를 찾은 경우
if len(ids) > 0:
clubid, menuid = ids[0]
return u'http://m.cafe.naver.com/ArticleWrite.nhn?clubid={clubid}&menuid={menuid}&m=write'.format(
clubid=clubid, menuid=menuid
)
else:
return value
|
{"/joonggo/serializers.py": ["/joonggo/models.py"], "/api/views.py": ["/joonggo/models.py", "/joonggo/serializers.py"], "/joonggobot/joonggobot_main.py": ["/joonggo/models.py"], "/joonggobot/signals.py": ["/joonggo/models.py", "/joonggobot/joonggobot_main.py"], "/joonggo/views.py": ["/joonggo/models.py"], "/joonggo/scraper/checkers.py": ["/joonggo/models.py"], "/joonggo/tasks.py": ["/joonggo/models.py"], "/joonggo/admin.py": ["/joonggo/models.py"], "/joonggo/scraper/cetizen_spiders.py": ["/joonggo/models.py"], "/joonggo/scraper/naver_standalone_checker.py": ["/joonggo/models.py"], "/joonggo/scraper/spiders.py": ["/joonggo/models.py"], "/joonggobot/apps.py": ["/joonggobot/signals.py"], "/joonggo/scraper/cetizen_standalone_checker.py": ["/joonggo/models.py"], "/joonggobot/views.py": ["/joonggobot/joonggobot_main.py"], "/api/urls.py": ["/api/views.py"]}
|
19,409,925
|
kathar0s/maldives
|
refs/heads/develop
|
/joonggobot/apps.py
|
from django.apps import AppConfig
class JoonggobotConfig(AppConfig):
name = 'joonggobot'
def ready(self):
import joonggobot.signals
|
{"/joonggo/serializers.py": ["/joonggo/models.py"], "/api/views.py": ["/joonggo/models.py", "/joonggo/serializers.py"], "/joonggobot/joonggobot_main.py": ["/joonggo/models.py"], "/joonggobot/signals.py": ["/joonggo/models.py", "/joonggobot/joonggobot_main.py"], "/joonggo/views.py": ["/joonggo/models.py"], "/joonggo/scraper/checkers.py": ["/joonggo/models.py"], "/joonggo/tasks.py": ["/joonggo/models.py"], "/joonggo/admin.py": ["/joonggo/models.py"], "/joonggo/scraper/cetizen_spiders.py": ["/joonggo/models.py"], "/joonggo/scraper/naver_standalone_checker.py": ["/joonggo/models.py"], "/joonggo/scraper/spiders.py": ["/joonggo/models.py"], "/joonggobot/apps.py": ["/joonggobot/signals.py"], "/joonggo/scraper/cetizen_standalone_checker.py": ["/joonggo/models.py"], "/joonggobot/views.py": ["/joonggobot/joonggobot_main.py"], "/api/urls.py": ["/api/views.py"]}
|
19,409,926
|
kathar0s/maldives
|
refs/heads/develop
|
/joonggo/scraper/pipelines.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from builtins import str
from builtins import object
import logging
import re
from django.db.utils import IntegrityError
from scrapy.exceptions import DropItem
from dynamic_scraper.models import SchedulerRuntime
class DjangoWriterPipeline(object):
def process_item(self, item, spider):
if spider.conf['DO_ACTION']:
try:
item['source'] = spider.ref_object
#checker_rt = SchedulerRuntime(runtime_type='C')
#checker_rt.save()
#item['checker_runtime'] = checker_rt
item['price'] = self.adjust_price(item, spider)
item.save()
spider.action_successful = True
dds_id_str = str(item._dds_item_page) + '-' + str(item._dds_item_id)
spider.log("{cs}Item {id} saved to Django DB.{ce}".format(
id=dds_id_str,
cs=spider.bcolors['OK'],
ce=spider.bcolors['ENDC']), logging.INFO)
except IntegrityError as e:
spider.log(str(e), logging.ERROR)
raise DropItem("Missing attribute.")
return item
def adjust_price(self, item, spider):
new_price = 0
price_candidate = []
price_search = [item['title']]
for target in price_search:
price_candidate += re.findall(u'(\d+[원|만원]+)', target.replace(',', ''))
if len(price_candidate) > 0:
try:
new_price = int(price_candidate[0].replace(u'만', u'0000').replace(u'원', u''))
except ValueError as verr:
new_price = 0
except Exception as ex:
new_price = 0;
price = str(new_price) if new_price > 0 else item['price']
if price != item['price']:
spider.log("price mismatch, {content_price} != {crawl_price}".format(
content_price=price, crawl_price=item['price']), logging.ERROR)
return price
|
{"/joonggo/serializers.py": ["/joonggo/models.py"], "/api/views.py": ["/joonggo/models.py", "/joonggo/serializers.py"], "/joonggobot/joonggobot_main.py": ["/joonggo/models.py"], "/joonggobot/signals.py": ["/joonggo/models.py", "/joonggobot/joonggobot_main.py"], "/joonggo/views.py": ["/joonggo/models.py"], "/joonggo/scraper/checkers.py": ["/joonggo/models.py"], "/joonggo/tasks.py": ["/joonggo/models.py"], "/joonggo/admin.py": ["/joonggo/models.py"], "/joonggo/scraper/cetizen_spiders.py": ["/joonggo/models.py"], "/joonggo/scraper/naver_standalone_checker.py": ["/joonggo/models.py"], "/joonggo/scraper/spiders.py": ["/joonggo/models.py"], "/joonggobot/apps.py": ["/joonggobot/signals.py"], "/joonggo/scraper/cetizen_standalone_checker.py": ["/joonggo/models.py"], "/joonggobot/views.py": ["/joonggobot/joonggobot_main.py"], "/api/urls.py": ["/api/views.py"]}
|
19,409,927
|
kathar0s/maldives
|
refs/heads/develop
|
/joonggo/utils.py
|
# -*- coding: utf-8 -*-
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
def paginate_list(request, object_list):
if 'p' in request.GET:
page = request.GET['p']
else:
page = 1
per_page = 40
page_per_section = 5 # 한번에 몇개의 페이지를 출력할 것인가?
paginator = Paginator(object_list, per_page)
try:
page_objects = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
page = 1
page_objects = paginator.page(page)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
page_objects = paginator.page(paginator.num_pages)
page = int(page)
# 페이지 섹션 구하기
start_page_section_num = int((page - 1) / page_per_section) * page_per_section + 1
page_objects.previous_page_section = start_page_section_num - page_per_section
if page_objects.previous_page_section < 0:
page_objects.previous_page_section = 0
page_objects.has_previous_section = False
else:
page_objects.has_previous_section = True
page_objects.next_page_section = start_page_section_num + page_per_section
if page_objects.next_page_section >= page_objects.paginator.num_pages:
page_objects.next_page_section = page_objects.paginator.num_pages + 1
page_objects.has_next_section = False
else:
page_objects.has_next_section = True
page_objects.page_range = range(start_page_section_num, page_objects.next_page_section)
return page_objects
|
{"/joonggo/serializers.py": ["/joonggo/models.py"], "/api/views.py": ["/joonggo/models.py", "/joonggo/serializers.py"], "/joonggobot/joonggobot_main.py": ["/joonggo/models.py"], "/joonggobot/signals.py": ["/joonggo/models.py", "/joonggobot/joonggobot_main.py"], "/joonggo/views.py": ["/joonggo/models.py"], "/joonggo/scraper/checkers.py": ["/joonggo/models.py"], "/joonggo/tasks.py": ["/joonggo/models.py"], "/joonggo/admin.py": ["/joonggo/models.py"], "/joonggo/scraper/cetizen_spiders.py": ["/joonggo/models.py"], "/joonggo/scraper/naver_standalone_checker.py": ["/joonggo/models.py"], "/joonggo/scraper/spiders.py": ["/joonggo/models.py"], "/joonggobot/apps.py": ["/joonggobot/signals.py"], "/joonggo/scraper/cetizen_standalone_checker.py": ["/joonggo/models.py"], "/joonggobot/views.py": ["/joonggobot/joonggobot_main.py"], "/api/urls.py": ["/api/views.py"]}
|
19,409,928
|
kathar0s/maldives
|
refs/heads/develop
|
/joonggo/scraper/cetizen_standalone_checker.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from dynamic_scraper.spiders.django_spider import DjangoSpider
from scrapy.http import Request
from joonggo.models import Article, ArticleItem, Source
from django.db.models import Q
import time
class CetizenStandaloneChecker(DjangoSpider):
name = 'cetizen_standalone_checker'
start_urls = [
'http://market.cetizen.com/market.php?auc_sale=1',
]
def __init__(self, *args, **kwargs):
self._set_ref_object(Source, **kwargs)
self.scraper = self.ref_object.scraper
self.scrape_url = self.ref_object.url
self.login_page = self.ref_object.login_url
self.scheduler_runtime = self.ref_object.scraper_runtime
self.scraped_obj_class = Article
self.scraped_obj_item_class = ArticleItem
super(CetizenStandaloneChecker, self).__init__(self, *args, **kwargs)
def request_next_url(self):
articles = Article.objects.filter(Q(source=self.ref_object) & Q(survival_count=1)).order_by("id")[:1]
if len(articles) > 0:
article = articles[0]
#print(u"checking : %d %s %s %s\n" % (article.id, article.title, article.uid, article.url))
return Request(article.url, callback=self.detail_parse, meta={'article': article})
else:
print(u"checking ended!\n")
return None
def start_requests(self):
print(u'start_requests start')
yield self.request_next_url()
print(u'start_requests ends')
def detail_parse(self, response):
# 고유 아이디, 물품번호
uid = response.xpath("//div/span[@class=\"p14 clr02 ls-0\"]/text()").re_first(r'물품번호\s*:\s*(\d+)')
article = response.meta['article']
print(u"title : %s, uid : %s" % (article.title, article.uid))
print(u"response url : %s," % (response.url))
if uid is None or uid not in article.uid:
print(u"%s, %s mismatch\n%s" % (article.uid, uid, article.url))
article.survival_count = 0
article.is_sold_out = True
else:
print(u"%s, %s match\n%s" % (article.uid, uid, article.url))
article.survival_count += 1
article.save()
yield self.request_next_url()
|
{"/joonggo/serializers.py": ["/joonggo/models.py"], "/api/views.py": ["/joonggo/models.py", "/joonggo/serializers.py"], "/joonggobot/joonggobot_main.py": ["/joonggo/models.py"], "/joonggobot/signals.py": ["/joonggo/models.py", "/joonggobot/joonggobot_main.py"], "/joonggo/views.py": ["/joonggo/models.py"], "/joonggo/scraper/checkers.py": ["/joonggo/models.py"], "/joonggo/tasks.py": ["/joonggo/models.py"], "/joonggo/admin.py": ["/joonggo/models.py"], "/joonggo/scraper/cetizen_spiders.py": ["/joonggo/models.py"], "/joonggo/scraper/naver_standalone_checker.py": ["/joonggo/models.py"], "/joonggo/scraper/spiders.py": ["/joonggo/models.py"], "/joonggobot/apps.py": ["/joonggobot/signals.py"], "/joonggo/scraper/cetizen_standalone_checker.py": ["/joonggo/models.py"], "/joonggobot/views.py": ["/joonggobot/joonggobot_main.py"], "/api/urls.py": ["/api/views.py"]}
|
19,409,929
|
kathar0s/maldives
|
refs/heads/develop
|
/joonggo/scraper/settings.py
|
# -*- coding: utf-8 -*-
import os
import sys
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "maldives.settings.production") # Changed in DDS v.0.3
# sys.path.insert(0, os.path.join(PROJECT_ROOT, "../../..")) # only for example_project
BOT_NAME = 'maldives'
SPIDER_MODULES = ['dynamic_scraper.spiders', 'joonggo.scraper']
USER_AGENT = '%s/%s' % (BOT_NAME, '1.0')
# Scrapy 0.20+
ITEM_PIPELINES = {
'dynamic_scraper.pipelines.DjangoImagesPipeline': 200,
'dynamic_scraper.pipelines.ValidationPipeline': 400,
'joonggo.scraper.pipelines.DjangoWriterPipeline': 800,
}
IMAGES_STORE = os.path.join(PROJECT_ROOT, '../static/thumbnails')
IMAGES_THUMBS = {
'medium': (50, 50),
'small': (25, 25),
}
DSCRAPER_IMAGES_STORE_FORMAT = 'ALL'
DSCRAPER_LOG_ENABLED = False
DSCRAPER_LOG_LEVEL = 'ERROR'
DSCRAPER_LOG_LIMIT = 5
|
{"/joonggo/serializers.py": ["/joonggo/models.py"], "/api/views.py": ["/joonggo/models.py", "/joonggo/serializers.py"], "/joonggobot/joonggobot_main.py": ["/joonggo/models.py"], "/joonggobot/signals.py": ["/joonggo/models.py", "/joonggobot/joonggobot_main.py"], "/joonggo/views.py": ["/joonggo/models.py"], "/joonggo/scraper/checkers.py": ["/joonggo/models.py"], "/joonggo/tasks.py": ["/joonggo/models.py"], "/joonggo/admin.py": ["/joonggo/models.py"], "/joonggo/scraper/cetizen_spiders.py": ["/joonggo/models.py"], "/joonggo/scraper/naver_standalone_checker.py": ["/joonggo/models.py"], "/joonggo/scraper/spiders.py": ["/joonggo/models.py"], "/joonggobot/apps.py": ["/joonggobot/signals.py"], "/joonggo/scraper/cetizen_standalone_checker.py": ["/joonggo/models.py"], "/joonggobot/views.py": ["/joonggobot/joonggobot_main.py"], "/api/urls.py": ["/api/views.py"]}
|
19,409,930
|
kathar0s/maldives
|
refs/heads/develop
|
/joonggobot/views.py
|
import json
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.decorators import api_view
from joonggobot.joonggobot_main import JoonggoBot
import telegram
@api_view(['POST'])
@csrf_exempt
def webhook(request):
return HttpResponse('Hello Maldives bot, webhook!')
@api_view(['POST'])
@csrf_exempt
def webhook_polling(request):
if request.method == 'POST':
bot = JoonggoBot()
bot.handle(request.data['id'], request.data['type'], request.data['text'])
return HttpResponse(json.dumps(request.data), content_type='application/json')
else:
return HttpResponse('Hello Maldives bot, webhook!')
|
{"/joonggo/serializers.py": ["/joonggo/models.py"], "/api/views.py": ["/joonggo/models.py", "/joonggo/serializers.py"], "/joonggobot/joonggobot_main.py": ["/joonggo/models.py"], "/joonggobot/signals.py": ["/joonggo/models.py", "/joonggobot/joonggobot_main.py"], "/joonggo/views.py": ["/joonggo/models.py"], "/joonggo/scraper/checkers.py": ["/joonggo/models.py"], "/joonggo/tasks.py": ["/joonggo/models.py"], "/joonggo/admin.py": ["/joonggo/models.py"], "/joonggo/scraper/cetizen_spiders.py": ["/joonggo/models.py"], "/joonggo/scraper/naver_standalone_checker.py": ["/joonggo/models.py"], "/joonggo/scraper/spiders.py": ["/joonggo/models.py"], "/joonggobot/apps.py": ["/joonggobot/signals.py"], "/joonggo/scraper/cetizen_standalone_checker.py": ["/joonggo/models.py"], "/joonggobot/views.py": ["/joonggobot/joonggobot_main.py"], "/api/urls.py": ["/api/views.py"]}
|
19,409,931
|
kathar0s/maldives
|
refs/heads/develop
|
/maldives/settings/production.py
|
# -*- coding: utf-8 -*-
"""
Django settings for maldives project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import djcelery
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Proejct Root
PROJECT_DIR = os.path.abspath(os.path.join(BASE_DIR, '..'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ix^)at8hb#ijnd5@1@1=as0o7v2tx)oel3-&)tvcasla7-s%s#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['localhost', '127.0.0.1', '*']
DEFAULT_FROM_EMAIL = 'kathar0s.dev@gmail.com'
ADMINS = (
('kathar0s', 'kathar0s.dev@gmail.com'),
)
SITE_ID = 1
# Application definition
PROJECT_APPS = [
'joonggo',
'joonggobot',
]
THIRD_PARTY_APPS = [
'dynamic_scraper',
'kombu.transport.django',
'djcelery',
'django_filters',
'reversion',
'rest_framework',
'rest_framework_swagger'
]
INSTALLED_APPS = PROJECT_APPS + THIRD_PARTY_APPS + [
'django.contrib.admin',
'django.contrib.sites',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.humanize',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django.contrib.postgres',
]
MIDDLEWARE = [
'django.middleware.gzip.GZipMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'reversion.middleware.RevisionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.common.BrokenLinkEmailsMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'maldives.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.csrf', # necessary for csrf protection
],
'debug': True
},
},
]
WSGI_APPLICATION = 'maldives.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'maldives',
'USER': 'maldives',
'PASSWORD': 'havefun2mrow',
'HOST': '',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'ko-KR'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/s/'
STATIC_ROOT = os.path.join(PROJECT_DIR, 'static')
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.FileSystemFinder',
)
STATICFILES_DIRS = [
]
MEDIA_URL = '/m/'
MEDIA_ROOT = '/mnt/data/media/'
# For Crawler Scheduling
djcelery.setup_loader()
# Celery settings
BROKER_URL = 'amqp://guest:guest@localhost//'
BROKER_TRANSPORT = "django"
#: Only add pickle to this list if your broker is secured
#: from unwanted access (see userguide/security.html)
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
# REST_FRAMEWORK 설정
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',)
}
|
{"/joonggo/serializers.py": ["/joonggo/models.py"], "/api/views.py": ["/joonggo/models.py", "/joonggo/serializers.py"], "/joonggobot/joonggobot_main.py": ["/joonggo/models.py"], "/joonggobot/signals.py": ["/joonggo/models.py", "/joonggobot/joonggobot_main.py"], "/joonggo/views.py": ["/joonggo/models.py"], "/joonggo/scraper/checkers.py": ["/joonggo/models.py"], "/joonggo/tasks.py": ["/joonggo/models.py"], "/joonggo/admin.py": ["/joonggo/models.py"], "/joonggo/scraper/cetizen_spiders.py": ["/joonggo/models.py"], "/joonggo/scraper/naver_standalone_checker.py": ["/joonggo/models.py"], "/joonggo/scraper/spiders.py": ["/joonggo/models.py"], "/joonggobot/apps.py": ["/joonggobot/signals.py"], "/joonggo/scraper/cetizen_standalone_checker.py": ["/joonggo/models.py"], "/joonggobot/views.py": ["/joonggobot/joonggobot_main.py"], "/api/urls.py": ["/api/views.py"]}
|
19,409,932
|
kathar0s/maldives
|
refs/heads/develop
|
/joonggobot/signals.py
|
# -*- coding: utf-8 -*-
from django.db.models.signals import post_save
from django.dispatch import receiver
from joonggo.models import Article, Alarm
from joonggobot.joonggobot_main import JoonggoBot
@receiver(post_save, sender=Article)
def create_crawler_item(sender, instance, created, **kwargs):
if created:
for alarm in Alarm.objects.filter(disabled=False):
keyword_list = alarm.keyword.split()
full_contents = instance.title + ' ' + instance.content + ' ' + instance.tags
if all(keyword in full_contents for keyword in keyword_list):
send_message = u"사용자 게시글 등록 알림\n"
send_message += u"가격 : %s\n" % (instance.price)
send_message += u"제목 : %s\n" % (' '.join(instance.title.split()))
send_message += u"%s%s\n" % (instance.source.mobile_base_url, instance.uid)
bot = JoonggoBot()
bot.send_message(alarm.profile.chat, send_message)
else:
pass
|
{"/joonggo/serializers.py": ["/joonggo/models.py"], "/api/views.py": ["/joonggo/models.py", "/joonggo/serializers.py"], "/joonggobot/joonggobot_main.py": ["/joonggo/models.py"], "/joonggobot/signals.py": ["/joonggo/models.py", "/joonggobot/joonggobot_main.py"], "/joonggo/views.py": ["/joonggo/models.py"], "/joonggo/scraper/checkers.py": ["/joonggo/models.py"], "/joonggo/tasks.py": ["/joonggo/models.py"], "/joonggo/admin.py": ["/joonggo/models.py"], "/joonggo/scraper/cetizen_spiders.py": ["/joonggo/models.py"], "/joonggo/scraper/naver_standalone_checker.py": ["/joonggo/models.py"], "/joonggo/scraper/spiders.py": ["/joonggo/models.py"], "/joonggobot/apps.py": ["/joonggobot/signals.py"], "/joonggo/scraper/cetizen_standalone_checker.py": ["/joonggo/models.py"], "/joonggobot/views.py": ["/joonggobot/joonggobot_main.py"], "/api/urls.py": ["/api/views.py"]}
|
19,409,933
|
kathar0s/maldives
|
refs/heads/develop
|
/joonggobot/polling_server.py
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Simple Bot to reply to Telegram messages
# This program is dedicated to the public domain under the CC0 license.
"""
This Bot uses the Updater class to handle the bot.
First, a few handler functions are defined. Then, those functions are passed to
the Dispatcher and registered at their respective places.
Then, the bot is started and runs until we press Ctrl-C on the command line.
Usage:
Basic Echobot example, repeats messages.
Press Ctrl-C on the command line or send a signal to the process to stop the
bot.
"""
import json
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import logging
import requests
import sys
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
def sendToJoonggodives(message):
url = 'http://52.78.186.61/joonggobot/webhook_polling'
headers = {'Content-Type': 'application/json; charset=utf-8'}
cookies = {'webhook_id': 'test'}
response = requests.post(url, data=json.dumps(message), headers=headers, cookies=cookies)
def start(bot, update):
sendToJoonggodives({'type' : "start", 'id' : update.message.chat_id, 'text' : update.message.text})
def stop(bot, update):
sendToJoonggodives({'type' : "stop", 'id' : update.message.chat_id, 'text' : update.message.text})
def help(bot, update):
sendToJoonggodives({'type': "help", 'id': update.message.chat_id, 'text': update.message.text})
def search(bot, update):
sendToJoonggodives({'type': "search", 'id': update.message.chat_id, 'text': update.message.text})
def add_alarm(bot, update):
sendToJoonggodives({'type': "register_alarm", 'id': update.message.chat_id, 'text': update.message.text})
def list_alarm(bot, update):
sendToJoonggodives({'type': "list_alarm", 'id': update.message.chat_id, 'text': update.message.text})
def remove_alarm(bot, update):
sendToJoonggodives({'type': "remove_alarm", 'id': update.message.chat_id, 'text': update.message.text})
def password_alarm(bot, update):
sendToJoonggodives({'type': "password_alarm", 'id': update.message.chat_id, 'text': update.message.text})
def error(bot, update, error):
logger.warn('Update "%s" caused error "%s"' % (update, error))
def main():
reload(sys)
sys.setdefaultencoding('utf-8')
# Create the EventHandler and pass it your bot's token.
updater = Updater('373562267:AAGVYqG7JFud4tCePUdq-Bkd-Y6-dZsP568')
# Get the dispatcher to register handlers
dp = updater.dispatcher
# on different commands - answer in Telegram
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("stop", stop))
dp.add_handler(CommandHandler(u"도움말", help))
dp.add_handler(CommandHandler(u"알림등록", add_alarm))
dp.add_handler(CommandHandler(u"알림목록", list_alarm))
dp.add_handler(CommandHandler(u"알림삭제", remove_alarm))
dp.add_handler(CommandHandler(u"알림암호", password_alarm))
dp.add_handler(MessageHandler(Filters.text, search))
# on noncommand i.e message - echo the message on Telegram
#dp.add_handler(MessageHandler(Filters.text, echo))
# log all errors
dp.add_error_handler(error)
# Start the Bot
updater.start_polling()
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()
|
{"/joonggo/serializers.py": ["/joonggo/models.py"], "/api/views.py": ["/joonggo/models.py", "/joonggo/serializers.py"], "/joonggobot/joonggobot_main.py": ["/joonggo/models.py"], "/joonggobot/signals.py": ["/joonggo/models.py", "/joonggobot/joonggobot_main.py"], "/joonggo/views.py": ["/joonggo/models.py"], "/joonggo/scraper/checkers.py": ["/joonggo/models.py"], "/joonggo/tasks.py": ["/joonggo/models.py"], "/joonggo/admin.py": ["/joonggo/models.py"], "/joonggo/scraper/cetizen_spiders.py": ["/joonggo/models.py"], "/joonggo/scraper/naver_standalone_checker.py": ["/joonggo/models.py"], "/joonggo/scraper/spiders.py": ["/joonggo/models.py"], "/joonggobot/apps.py": ["/joonggobot/signals.py"], "/joonggo/scraper/cetizen_standalone_checker.py": ["/joonggo/models.py"], "/joonggobot/views.py": ["/joonggobot/joonggobot_main.py"], "/api/urls.py": ["/api/views.py"]}
|
19,409,934
|
kathar0s/maldives
|
refs/heads/develop
|
/api/views.py
|
# -*- coding: utf-8 -*-
import collections
from collections import OrderedDict
import json
import datetime
from django.contrib.auth import authenticate, login as django_login
from django.db.models import Avg, Min, Max, Q
from django.http import JsonResponse
from django_pandas.io import read_frame
from rest_framework import viewsets, filters
from rest_framework.decorators import list_route
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
from joonggo.models import Article, Alarm
from joonggo.serializers import ArticleSerializer, AlarmSerializer
class PaginationClass(PageNumberPagination):
page_size = 10
page_size_query_param = 'limit'
class ArticleViewSet(viewsets.ModelViewSet):
queryset = Article.objects.all().select_related('source')
serializer_class = ArticleSerializer
pagination_class = PaginationClass
info = {}
# 키워드로 검색하는 경우에 대해서 처리한다.
@list_route()
def search(self, request):
get = request.GET.copy()
if 'q' in get and get['q'] != '':
query = get['q']
# template_data = retrieve_item(query)
queryset = self.get_queryset()
# 해당 검색어에 대해 평균가격을 구한다. (1만원 이상에 대해서만)
# 현재 존재하는 글에 대해서만 구한다. (survival_count == 1)
# 현재 판매중인 내용에 대해서만 찾는다.
# queryset = queryset.filter(is_sold_out=False, survival_count__gte=1, price__gte=10000,
# title__icontains=query).order_by('price')
qs = (Q(is_sold_out=False) & Q(survival_count__gte=1) & Q(price__gte=10000))
for kw in query.split(' '):
qs &= Q(title__icontains=kw)
queryset = queryset.filter(qs).order_by('price')
# 검색이 된 경우에만 평균가격을 산출하고 진행한다.
if queryset.count() > 0:
result = queryset.aggregate(avg_price=Avg('price'))
avg_price = result['avg_price']
# 검색결과의 노이즈가 있을 수 있으니
# 최저가격은 평균가격의 20%보다 작은 것들은 제외하고
# 최고가격은 평균가격의 3배보다 큰 것들은 제외한다.
queryset = queryset.filter(price__gte=avg_price * 0.2, price__lt=avg_price * 3)
# 해당 검색결과에서 최대, 최소 가격을 구한다.
result = queryset.aggregate(max_price=Max('price'), min_price=Min('price'))
self.info = result
else:
info = None
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return Response(OrderedDict([
('count', self.paginator.page.paginator.count),
('next', self.paginator.get_next_link()),
('previous', self.paginator.get_previous_link()),
('info', self.info),
('results', serializer.data)
]))
# 그 외의 경우에는 모든 값을 반환한다.
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
else:
template_data = {
'rsltCd': 'N',
'rsltNsg': '조회 조건 없음'
}
return Response(template_data)
@list_route()
def trend(self, request):
get = request.GET.copy()
if 'q' in get and get['q'] != '':
query = get['q']
return Response(retrieve_item(query))
class AlarmViewSet(viewsets.ModelViewSet):
queryset = Alarm.objects.all().select_related('profile').order_by('id')
serializer_class = AlarmSerializer
pagination_class = PaginationClass
filter_backends = (filters.DjangoFilterBackend, )
filter_fields = ('profile', 'id', 'profile__chat', 'profile__user')
def retrieve_item(keyword):
# 최근 2주일 데이터만 조회
end_date = datetime.date.today() # 현재 날짜 가져오기
period = datetime.timedelta(days=13)
start_date = end_date - period
queryset = Article.objects.filter(created__gte=start_date).order_by('-created')
# queryset = Article.objects.all()
queryset = queryset.filter(is_sold_out=False, survival_count__gte=1, price__gte=10000, title__icontains=keyword)
# 판매 글만 조회 도도록 문구 제거
title_exclude = ['삽니다', '구합니다', '배터리']
exclude_query = Q(title__icontains=title_exclude[0])
for t in title_exclude[1:]:
exclude_query = exclude_query | Q(title__icontains=t)
queryset = queryset.exclude(exclude_query)
if queryset.count() > 0:
df_article_data = read_frame(queryset,
fieldnames=['id', 'uid', 'title', 'price', 'url', 'created', 'source_id'])
# title 중복 제거
df_article_data = df_article_data.sort_values('price', ascending=True).drop_duplicates('title')
# 날짜 형식 변환 및 trend date 생성
df_article_data['trend_date'] = df_article_data['created'].apply(lambda x: x.strftime('%m-%d'))
df_article_data['created'] = df_article_data['created'].apply(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'))
# 평균값의 20%의 가격으로 최저가 책정/ 평균값의 3배 가격으로 최고가 책정
avg_price = df_article_data['price'].mean()
df_article_data = df_article_data[df_article_data['price'] >= avg_price * 0.2]
df_article_data = df_article_data[df_article_data['price'] < avg_price * 3]
df_article_data = df_article_data[df_article_data['price'] % 100 == 0]
df_article_data = df_article_data.reset_index(drop=True)
# 최저가, 최고가
min_price = df_article_data['price'].min()
max_price = df_article_data['price'].max()
# 일별 최저가, 평균가
avg_daily = df_article_data.groupby(['trend_date'])['price'].mean()
min_daily = df_article_data.groupby(['trend_date'])['price'].min()
# dictionary 변경
# df_article_data = df_article_data.T
# dict_article_data = df_article_data.to_dict()
# dict_article_data = collections.OrderedDict(sorted(dict_article_data.items()))
# dict_article_data = check_key_type(dict_article_data)
dict_avg_daily = avg_daily.to_dict()
dict_avg_daily = collections.OrderedDict(sorted(dict_avg_daily.items()))
dict_min_daily = min_daily.to_dict()
dict_min_daily = collections.OrderedDict(sorted(dict_min_daily.items()))
# 그래프를 위한 list 변경
list_avg_date = list()
list_avg_price = list()
list_min_price = list()
for key in dict_avg_daily.keys():
list_avg_date.append(key)
list_avg_price.append(dict_avg_daily[key])
for key in dict_min_daily.keys():
list_min_price.append(dict_min_daily[key])
content = {
'rsltCd': 'Y',
'rsltNsg': u'정상',
# 'article_data': dict_article_data, # 중고 물건 목록
'trend_day': list_avg_date, # 평균가/최저가 날짜 - List
'trend_avg_price': list_avg_price, # 평균가 추세 - List
'trend_min_price': list_min_price, # 최저가 추세 - List
'min_price': min_price, # 최저가
'max_price': max_price # 최고가
}
else:
content = {'rsltCd': 'N', 'rsltNsg': u'데이터가 없음'}
return content
# dictionary key type을 String으로 변환
def check_key_type(dict):
for key in dict.keys():
if type(key) is not str:
try:
dict[str(key)] = dict[key]
except:
try:
dict[repr(key)] = dict[key]
except:
pass
del dict[key]
return dict
def login(request):
if request.method == 'POST':
username = request.POST.get('id', '')
password = request.POST.get('password', '')
user = authenticate(username='bot_{username}'.format(username=username), password=password)
response_data = {
'error': True
}
if user is not None:
if user.is_active:
django_login(request, user)
response_data['user'] = {
'id': user.id,
'username': user.username,
'profile': {
'id': user.chatprofile.id
}
}
response_data['error'] = False
response_data['message'] = 'You"re logged in'
else:
response_data['message'] = 'Your id is disabled'
else:
response_data['message'] = 'You messed up'
return JsonResponse(response_data)
|
{"/joonggo/serializers.py": ["/joonggo/models.py"], "/api/views.py": ["/joonggo/models.py", "/joonggo/serializers.py"], "/joonggobot/joonggobot_main.py": ["/joonggo/models.py"], "/joonggobot/signals.py": ["/joonggo/models.py", "/joonggobot/joonggobot_main.py"], "/joonggo/views.py": ["/joonggo/models.py"], "/joonggo/scraper/checkers.py": ["/joonggo/models.py"], "/joonggo/tasks.py": ["/joonggo/models.py"], "/joonggo/admin.py": ["/joonggo/models.py"], "/joonggo/scraper/cetizen_spiders.py": ["/joonggo/models.py"], "/joonggo/scraper/naver_standalone_checker.py": ["/joonggo/models.py"], "/joonggo/scraper/spiders.py": ["/joonggo/models.py"], "/joonggobot/apps.py": ["/joonggobot/signals.py"], "/joonggo/scraper/cetizen_standalone_checker.py": ["/joonggo/models.py"], "/joonggobot/views.py": ["/joonggobot/joonggobot_main.py"], "/api/urls.py": ["/api/views.py"]}
|
19,409,935
|
kathar0s/maldives
|
refs/heads/develop
|
/api/urls.py
|
# -*- coding: utf-8 -*-
from django.conf.urls import url, include
from api.views import ArticleViewSet, AlarmViewSet, login
from rest_framework import routers
router = routers.DefaultRouter()
router.register(r'article', ArticleViewSet)
router.register(r'alarm', AlarmViewSet)
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^login/$', login, name='login')
]
|
{"/joonggo/serializers.py": ["/joonggo/models.py"], "/api/views.py": ["/joonggo/models.py", "/joonggo/serializers.py"], "/joonggobot/joonggobot_main.py": ["/joonggo/models.py"], "/joonggobot/signals.py": ["/joonggo/models.py", "/joonggobot/joonggobot_main.py"], "/joonggo/views.py": ["/joonggo/models.py"], "/joonggo/scraper/checkers.py": ["/joonggo/models.py"], "/joonggo/tasks.py": ["/joonggo/models.py"], "/joonggo/admin.py": ["/joonggo/models.py"], "/joonggo/scraper/cetizen_spiders.py": ["/joonggo/models.py"], "/joonggo/scraper/naver_standalone_checker.py": ["/joonggo/models.py"], "/joonggo/scraper/spiders.py": ["/joonggo/models.py"], "/joonggobot/apps.py": ["/joonggobot/signals.py"], "/joonggo/scraper/cetizen_standalone_checker.py": ["/joonggo/models.py"], "/joonggobot/views.py": ["/joonggobot/joonggobot_main.py"], "/api/urls.py": ["/api/views.py"]}
|
19,418,957
|
chency820/face_patch_mnn
|
refs/heads/master
|
/andy/face_patch_mnn-main/test_file_name.py
|
data_root_path = ''
path = "S129/002/S129_002_00000011.png"
base_path = path[:-12]
# if neutral expression included:
# path_neu = data_root_path + base_path + '00000001.png'
path_num_part = path[-12:-4]
print(len(str(int(path_num_part))))
path_last3 = data_root_path + base_path + '0' * (8 - len(str(int(path_num_part) - 2))) + str(int(path_num_part) - 2) + '.png'
path_last2 = data_root_path + base_path + '0' * (8 - len(str(int(path_num_part) - 1))) + str(int(path_num_part) - 1) + '.png'
path_last1 = data_root_path + path
print(path_last3, path_last2, path_last1)
|
{"/generate_pickle.py": ["/FaceProcessUtil.py", "/facePatch.py"], "/train.py": ["/dataset.py", "/net.py", "/newmnn.py"], "/dataset.py": ["/facePatch.py"], "/productPKLforOuluCas.py": ["/FaceProcessUtil.py"], "/facePatch.py": ["/FaceProcessUtil.py"], "/productPKLforCKP.py": ["/FaceProcessUtil.py", "/facePatch.py"]}
|
19,571,748
|
fullerth/WatchTape
|
refs/heads/main
|
/source/WatchTape/tests/test_home_page.py
|
from player_list.tests.test_Video import VideoTestCase
from django.http import HttpRequest
from django.core.urlresolvers import reverse
from WatchTape.views import home
from player_list.models import Video
class VideoListTest(VideoTestCase):
def test_home_page_context_contains_correct_videos(self):
video_1 = self._create_video()
video_2 = self._create_video()
videos = [video_1, video_2]
response = self.client.get(reverse("home"))
for i, video in enumerate(response.context['videos']):
self.assertTrue(isinstance(video, Video))
self.assertEqual(video, videos[i]['instance'])
|
{"/source/functional_tests/test_jam_controls.py": ["/source/functional_tests/base.py"], "/source/functional_tests/test_django_allauth_settings.py": ["/source/functional_tests/base.py"], "/source/functional_tests/test_home_page.py": ["/source/functional_tests/base.py"], "/source/functional_tests/test_navbar.py": ["/source/functional_tests/base.py"], "/source/functional_tests/test_video_player.py": ["/source/functional_tests/base.py"]}
|
19,571,749
|
fullerth/WatchTape
|
refs/heads/main
|
/source/WatchTape/urls.py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', 'WatchTape.views.home', name='home'),
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/', include('allauth.urls')),
url(r'^watchtape/', include('player_list.urls')),
url(r'^watchtape/', include('video_player.urls')),
#Can use the default bootstrap template to check if bootstrap is working
#url(r'^template/', 'WatchTape.views.template', name='template'),
)
|
{"/source/functional_tests/test_jam_controls.py": ["/source/functional_tests/base.py"], "/source/functional_tests/test_django_allauth_settings.py": ["/source/functional_tests/base.py"], "/source/functional_tests/test_home_page.py": ["/source/functional_tests/base.py"], "/source/functional_tests/test_navbar.py": ["/source/functional_tests/base.py"], "/source/functional_tests/test_video_player.py": ["/source/functional_tests/base.py"]}
|
19,571,750
|
fullerth/WatchTape
|
refs/heads/main
|
/source/video_player/tests/test_views.py
|
from django.test import TestCase, RequestFactory
from video_player.views import view_video_player
from player_list.models import Video
class VideoTaggingTests(TestCase):
def setUp(self):
self.requestFactory = RequestFactory()
#def test_renders_page_without_any_video_to_jams(self):
# '''
# Create a request with data that contains no videotojam items
# Verify that view_video_player renders the jam timing template
# in the response
# '''
#request = self.requestFactory.get('/video_player/video/{0}'.format(
# video.id))
#response = view_video_player(request, video.id)
#self.assertEqual(response.status_code, 200,
# "Failed to load video player page")
#return response
|
{"/source/functional_tests/test_jam_controls.py": ["/source/functional_tests/base.py"], "/source/functional_tests/test_django_allauth_settings.py": ["/source/functional_tests/base.py"], "/source/functional_tests/test_home_page.py": ["/source/functional_tests/base.py"], "/source/functional_tests/test_navbar.py": ["/source/functional_tests/base.py"], "/source/functional_tests/test_video_player.py": ["/source/functional_tests/base.py"]}
|
19,571,751
|
fullerth/WatchTape
|
refs/heads/main
|
/source/WatchTape/views.py
|
from django.shortcuts import render, get_object_or_404, get_list_or_404
from django.http import HttpResponse
from django.template import RequestContext, loader
from player_list.models import Video
from video_player.views import view_video_player
def home(request):
return (view_video_player(request, video_id=1))
|
{"/source/functional_tests/test_jam_controls.py": ["/source/functional_tests/base.py"], "/source/functional_tests/test_django_allauth_settings.py": ["/source/functional_tests/base.py"], "/source/functional_tests/test_home_page.py": ["/source/functional_tests/base.py"], "/source/functional_tests/test_navbar.py": ["/source/functional_tests/base.py"], "/source/functional_tests/test_video_player.py": ["/source/functional_tests/base.py"]}
|
19,571,752
|
fullerth/WatchTape
|
refs/heads/main
|
/source/WatchTape/settings.py
|
"""
Django settings for WatchTape project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'z7-+ph9akx15um21s2i@%zgrxwtd-q6w9-*u@(c$47tj--b$cr'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
TEMPLATE_DIRS = (os.path.join(BASE_DIR, 'WatchTape/templates/WatchTape'),)
ALLOWED_HOSTS = []
#DEBUG TOOLBAR SETTINGS
INTERNAL_IPS = ('10.0.2.2')
#Setup Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt' : "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'mysite.log',
'formatter': 'verbose'
},
'applogfile': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': os.path.join(BASE_DIR, 'APPNAME.log'),
'maxBytes': 1024*1024*15, # 15MB
'backupCount': 10,
'formatter': 'verbose'
},
},
'loggers': {
'django': {
'handlers':['file'],
'propagate': True,
'level':'DEBUG',
},
'video_player': {
'handlers': ['applogfile'],
'level': 'DEBUG',
},
}
}
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'player_list',
'video_player',
'debug_toolbar',
'rest_framework',
#allauth
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.facebook',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'WatchTape.urls'
WSGI_APPLICATION = 'WatchTape.wsgi.application'
# Authentication
TEMPLATE_CONTEXT_PROCESSORS = (
# Required by `allauth` template tags
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth',
# `allauth` specific context processors
'allauth.account.context_processors.account',
'allauth.socialaccount.context_processors.socialaccount',
)
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
SITE_ID = 2
LOGIN_REDIRECT_URL = '/'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, '../database/db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, '../static')
STATICFILES_DIRS = (
os.path.join(
os.path.dirname(BASE_DIR),
'source/WatchTape/static',
),
os.path.join(
os.path.dirname(BASE_DIR),
'source/video_player/static',
),
)
|
{"/source/functional_tests/test_jam_controls.py": ["/source/functional_tests/base.py"], "/source/functional_tests/test_django_allauth_settings.py": ["/source/functional_tests/base.py"], "/source/functional_tests/test_home_page.py": ["/source/functional_tests/base.py"], "/source/functional_tests/test_navbar.py": ["/source/functional_tests/base.py"], "/source/functional_tests/test_video_player.py": ["/source/functional_tests/base.py"]}
|
19,578,581
|
1999John/Compilation
|
refs/heads/main
|
/LR/util.py
|
import re
from .product import Product
from .project import Project
from . import *
def parse_inp(inp):
ret = re.findall("id|[+*A-Za-z~|][']?|[()]|·", inp)
ret.append("$")
return ret
def get_action_position(id):
return action_di[id]
def get_goto_position(non_ter):
return goto_di[non_ter]
def get_ori_project():
f = open("LR/test.txt","r")
num = int(f.readline())
products = []
for i in range(num):
p = Product(f.readline())
products.append(p)
return Project(products)
def get_ret_ind(product):
if product.left=="E'":
return "acc"
ori = get_ori_project()
return 0-ori.products.index(product)-1
|
{"/LR/Follow_First.py": ["/LR/product.py", "/LR/project.py", "/LR/util.py", "/LR/__init__.py", "/LL/products.py", "/LL/util.py"], "/inp.py": ["/LR/util.py", "/LR/SLR.py", "/DataStruct/MyList.py"], "/LR/util.py": ["/LR/product.py", "/LR/project.py", "/LR/__init__.py", "/LR/Ter.py"], "/LR/SLR.py": ["/LR/product.py", "/LR/project.py", "/LR/util.py", "/LR/__init__.py"], "/main.py": ["/LR/SLR.py", "/LR/util.py", "/LR/project.py"], "/LR/project.py": ["/LR/product.py"], "/LL/util.py": ["/LL/first_follow_class.py", "/LL/products.py"]}
|
19,578,582
|
1999John/Compilation
|
refs/heads/main
|
/LL/first_follow_class.py
|
class FF:
def __init__(self,source):
self.source = source
self.add = []
self.first = []
self.follow = []
def reset_add(self):
self._add=[]
def __str__(self):
fi,fol="",""
for f in self.first:
fi+= f
for fo in self.follow:
fol += fo
return "First({}):".format(self.source)+fi+"\nFollow({}):".format(self.source)+fol
|
{"/LR/Follow_First.py": ["/LR/product.py", "/LR/project.py", "/LR/util.py", "/LR/__init__.py", "/LL/products.py", "/LL/util.py"], "/inp.py": ["/LR/util.py", "/LR/SLR.py", "/DataStruct/MyList.py"], "/LR/util.py": ["/LR/product.py", "/LR/project.py", "/LR/__init__.py", "/LR/Ter.py"], "/LR/SLR.py": ["/LR/product.py", "/LR/project.py", "/LR/util.py", "/LR/__init__.py"], "/main.py": ["/LR/SLR.py", "/LR/util.py", "/LR/project.py"], "/LR/project.py": ["/LR/product.py"], "/LL/util.py": ["/LL/first_follow_class.py", "/LL/products.py"]}
|
19,578,583
|
1999John/Compilation
|
refs/heads/main
|
/LL/products.py
|
import re
class Product:
def __init__(self,pd:str):
self._source:str
self._right:list
self.parse(pd)
def parse(self,pd:str):
self._source = pd[:pd.find('-')]
right = pd[pd.find('>')+1:]
self._right = self.parse_right_to_list(right)
# print(self._source,self._right)
def parse_right_to_list(self,right:str):
return re.findall("id|[+*A-Za-z~|][']?|[()]",right)
@property
def source(self):
return self._source
@property
def right(self):
return self._right
def __str__(self) -> str:
right = ""
for i in self._right:
right += str(i)
return self._source+'->'+right
if __name__=='__main__':
pd = "A->(B)BC'idc|~"
p = Product(pd)
print(p)
|
{"/LR/Follow_First.py": ["/LR/product.py", "/LR/project.py", "/LR/util.py", "/LR/__init__.py", "/LL/products.py", "/LL/util.py"], "/inp.py": ["/LR/util.py", "/LR/SLR.py", "/DataStruct/MyList.py"], "/LR/util.py": ["/LR/product.py", "/LR/project.py", "/LR/__init__.py", "/LR/Ter.py"], "/LR/SLR.py": ["/LR/product.py", "/LR/project.py", "/LR/util.py", "/LR/__init__.py"], "/main.py": ["/LR/SLR.py", "/LR/util.py", "/LR/project.py"], "/LR/project.py": ["/LR/product.py"], "/LL/util.py": ["/LL/first_follow_class.py", "/LL/products.py"]}
|
19,578,584
|
1999John/Compilation
|
refs/heads/main
|
/LR/product.py
|
import re
class Product:
def __init__(self,product:str):
self.right:list
self.left:str
self._parse_str_to_Product(product)
def _parse_str_to_Product(self,product:str):
self.left = product[:product.index('->')]
self.right = self._parse_right_to_list(product[product.index('->')+2:])
def _parse_right_to_list(self,right:str):
return re.findall("id|[+*A-Za-z~|][']?|[()]|·",right)
def __eq__(self, o: object) -> bool:
if self.left!=o.left:
return False
if len(self.right)!=len(o.right):
return False
for l1,l2 in zip(self.right,o.right):
if l1!=l2:
return False
return True
def __str__(self) -> str:
ret = self.left+"->"
for s in self.right:
ret+=s
return ret
def copy(self):
p = Product(self.__str__()[:])
p.right = self.right[:]
return p
|
{"/LR/Follow_First.py": ["/LR/product.py", "/LR/project.py", "/LR/util.py", "/LR/__init__.py", "/LL/products.py", "/LL/util.py"], "/inp.py": ["/LR/util.py", "/LR/SLR.py", "/DataStruct/MyList.py"], "/LR/util.py": ["/LR/product.py", "/LR/project.py", "/LR/__init__.py", "/LR/Ter.py"], "/LR/SLR.py": ["/LR/product.py", "/LR/project.py", "/LR/util.py", "/LR/__init__.py"], "/main.py": ["/LR/SLR.py", "/LR/util.py", "/LR/project.py"], "/LR/project.py": ["/LR/product.py"], "/LL/util.py": ["/LL/first_follow_class.py", "/LL/products.py"]}
|
19,578,585
|
1999John/Compilation
|
refs/heads/main
|
/LR/SLR.py
|
from .product import Product
from .project import Project
from .util import *
import numpy as np
from . import *
from .Follow import getFollow
def build_DFA(queue):
count = 0
while len(queue) != 0:
project = queue.pop(0)
ori = len(Project.projects)
for ele in project.get_ele_after_dot():
p = project.goto(ele)
queue.append(p)
cur = len(Project.projects)
if ori == cur:
count += 1
if count == 10:
break
else:
ori = cur
# def getFollow(ter):
# return {"E'": ['$'],
# "E": ['$', '+',")",'-'],
# "T": ["$", "+", ")", "*",'-','/'],
# "F": ["$", "+", ")", "*",'-','/']}[ter]
def build_SLR(flag=None):
action_di,goto_di = get_di()
if flag is None:
analysis_table = {0: {"action": [5, np.nan, np.nan, 4, np.nan, np.nan], "goto": [1, 2, 3]},
1: {"action": [np.nan, 6, np.nan, np.nan, np.nan, "acc"], "goto": [np.nan, np.nan, np.nan]},
2: {"action": [np.nan, -2, 7, np.nan, -2, -2], "goto": [np.nan, np.nan, np.nan]},
3: {"action": [np.nan, -4, -4, np.nan, -4, -4], "goto": [np.nan, np.nan, np.nan]},
4: {"action": [5, np.nan, np.nan, 4, np.nan, np.nan], "goto": [8, 2, 3]},
5: {"action": [np.nan, -6, -6, np.nan, -6, -6], "goto": [np.nan, np.nan, np.nan]},
6: {"action": [5, np.nan, np.nan, 4, np.nan, np.nan], "goto": [np.nan, 9, 3]},
7: {"action": [5, np.nan, np.nan, 4, np.nan, np.nan], "goto": [np.nan, np.nan, 10]},
8: {"action": [np.nan, 6, np.nan, np.nan, 11, np.nan], "goto": [np.nan, np.nan, np.nan]},
9: {"action": [np.nan, -1, 7, np.nan, -1, -1], "goto": [np.nan, np.nan, np.nan]},
10: {"action": [np.nan, -3, -3, np.nan, -3, -3], "goto": [np.nan, np.nan, np.nan]},
11: {"action": [np.nan, -5, -5, np.nan, -5, -5], "goto": [np.nan, np.nan, np.nan]}}
return analysis_table
else:
analysis_table = {}
for state, project in enumerate(Project.projects):
analysis_table[state] = {"action": [np.nan for _ in range(len(action_di))], "goto": [np.nan for _ in range(len(goto_di))]}
action_goto_list = project.get_ele_after_dot()
for ter in action_goto_list:
if re.match("[A-Z][']*", ter):
analysis_table[state]["goto"][get_goto_position(ter)] = Project.projects.index(project.goto(ter))
else:
analysis_table[state]["action"][get_action_position(ter)] = Project.projects.index(
project.goto(ter))
for p in project.products:
dot = p.right.index("·")
if dot + 1 == len(p.right):
p_ = p.copy()
p_.right = p_.right[:-1]
ind = get_ret_ind(p_)
for _ in getFollow(p_.left):
analysis_table[state]["action"][get_action_position(_)] = ind
return analysis_table
if __name__ == '__main__':
f = open("test.txt", 'r')
num = int(f.readline())
products = []
for i in range(num):
product = f.readline()
product = Product(product)
products.append(product)
project = Project(products, True)
build_DFA([project])
an_1 = build_SLR(project)
an_2 = build_SLR()
print(an_1)
print(an_2)
|
{"/LR/Follow_First.py": ["/LR/product.py", "/LR/project.py", "/LR/util.py", "/LR/__init__.py", "/LL/products.py", "/LL/util.py"], "/inp.py": ["/LR/util.py", "/LR/SLR.py", "/DataStruct/MyList.py"], "/LR/util.py": ["/LR/product.py", "/LR/project.py", "/LR/__init__.py", "/LR/Ter.py"], "/LR/SLR.py": ["/LR/product.py", "/LR/project.py", "/LR/util.py", "/LR/__init__.py"], "/main.py": ["/LR/SLR.py", "/LR/util.py", "/LR/project.py"], "/LR/project.py": ["/LR/product.py"], "/LL/util.py": ["/LL/first_follow_class.py", "/LL/products.py"]}
|
19,578,586
|
1999John/Compilation
|
refs/heads/main
|
/main.py
|
from LR.SLR import build_SLR,build_DFA
from LR.util import parse_inp, get_action_position, get_goto_position, get_ori_project
from LR.project import Project
import numpy as np
if __name__ == '__main__':
stack = [0]
inp = input("input language:")
inp = parse_inp(inp)
ori_project = get_ori_project()
build_project = Project(ori_project.products,True)
build_DFA([build_project])
analysis_table = build_SLR(True)
# an_1 = build_SLR(ori_project)
# an_2 = build_SLR()
# print(an_1)
# print(an_2)
# exit(0)
while True:
cur_state = stack[-1]
ter = inp[0]
ind = get_action_position(ter)
cont = analysis_table[cur_state]["action"][ind]
if cont is np.nan:
print("error")
exit(1)
elif cont == "acc":
print("0 error,0 warning")
exit(0)
elif cont >= 0:
print("移进")
inp = inp[1:]
stack.append(ter)
stack.append(cont)
ter = inp[0]
cur_state = stack[-1]
elif cont < 0:
cont = 0 - cont-1
p = ori_project.products[cont]
print("按照" + str(p) + "规约")
for _ in range((len(p.right)-1) * 2):
stack.pop()
cur_state = stack[-1]
stack.append(p.left)
goto_ind = get_goto_position(p.left)
goto_state = analysis_table[cur_state]["goto"][goto_ind]
if goto_state is np.nan:
print("error")
exit(1)
else:
stack.append(goto_state)
cur_state = goto_state
|
{"/LR/Follow_First.py": ["/LR/product.py", "/LR/project.py", "/LR/util.py", "/LR/__init__.py", "/LL/products.py", "/LL/util.py"], "/inp.py": ["/LR/util.py", "/LR/SLR.py", "/DataStruct/MyList.py"], "/LR/util.py": ["/LR/product.py", "/LR/project.py", "/LR/__init__.py", "/LR/Ter.py"], "/LR/SLR.py": ["/LR/product.py", "/LR/project.py", "/LR/util.py", "/LR/__init__.py"], "/main.py": ["/LR/SLR.py", "/LR/util.py", "/LR/project.py"], "/LR/project.py": ["/LR/product.py"], "/LL/util.py": ["/LL/first_follow_class.py", "/LL/products.py"]}
|
19,578,587
|
1999John/Compilation
|
refs/heads/main
|
/LR/project.py
|
from .product import Product
import re
def remove_same(products):
ret = []
for p in products:
if p not in ret:
ret.append(p)
return ret
class Project:
projects = []
def __init__(self,products:list,initial=None):
self.products = remove_same(products)
if initial!=None:
p = Product("E'->E")
self.products.insert(0,p)
for i in range(len(self.products)):
self.products[i].right.insert(0,'·')
Project.projects.append(self)
self.points = []
def goto(self,inp:str):
ret_project = []
for pro in self.products:
p = pro.copy()
dot = p.right.index('·')
if dot+1 != len(p.right):
if p.right[dot+1]==inp: # todo: 判断dot是否越界
inp_index = dot+1
p.right[dot],p.right[inp_index] = p.right[inp_index],p.right[dot]
ret_project.append(p)
ret_project += self._add_non_ter_product(p)
rp = Project(ret_project)
if rp not in Project.projects:
Project.projects.append(rp)
self.points.append(Project.projects[Project.projects.index(rp)])
return Project.projects[Project.projects.index(rp)]
def _add_non_ter_product(self,product):
dot = product.right.index("·")
ret = []
if dot!=len(product.right)-1:
if re.match("[A-Z][']*",product.right[dot+1]):
for p in Project.projects[0].products:
if p.left==product.right[dot+1]:
ret.append(p)
for i in range(len(Project.projects[0].products)):
for p in ret[:]:
dot_ = p.right.index("·")
if re.match("[A-Z][']*",p.right[dot_+1]):
for p_ in Project.projects[0].products:
if p_.left==p.right[dot_+1] and p_ not in ret:
ret.append(p_)
return ret
def get_ele_after_dot(self):
ret = []
for p in self.products:
dot = p.right.index("·")
if len(p.right)!=dot+1:
if p.right[dot+1] not in ret:
ret.append(p.right[dot+1])
return ret
def __eq__(self, o: object) -> bool:
if len(self.products)!=len(o.products):
return False
for p in self.products:
if p not in o.products:
return False
return True
def __str__(self) -> str:
ret = ""
for p in self.products:
ret+=str(p)+"\n"
return ret
|
{"/LR/Follow_First.py": ["/LR/product.py", "/LR/project.py", "/LR/util.py", "/LR/__init__.py", "/LL/products.py", "/LL/util.py"], "/inp.py": ["/LR/util.py", "/LR/SLR.py", "/DataStruct/MyList.py"], "/LR/util.py": ["/LR/product.py", "/LR/project.py", "/LR/__init__.py", "/LR/Ter.py"], "/LR/SLR.py": ["/LR/product.py", "/LR/project.py", "/LR/util.py", "/LR/__init__.py"], "/main.py": ["/LR/SLR.py", "/LR/util.py", "/LR/project.py"], "/LR/project.py": ["/LR/product.py"], "/LL/util.py": ["/LL/first_follow_class.py", "/LL/products.py"]}
|
19,578,588
|
1999John/Compilation
|
refs/heads/main
|
/LL/util.py
|
"""
E->TE'
E'->+TE'|~
T->FT'
T'->*FT'|~
F->(E)|id
"""
from .first_follow_class import FF
from .products import Product
import re
sources = {}
products = []
ter = {'id':0,'+':1,'*':2,'(':3,')':4,'$':5}
non_ter = {'E':0,"E'":1,"T":2,"T'":3,"F":4}
def getFirst():
for i in range(len(products)):
source = products[i].source
if products[i].source not in sources.keys():
sources[source] = FF(source)
# 右边第一个是终结符直接加入
if re.match('id|[+*a-z~()]', products[i].right[0]):
sources[source].first.append(products[i].right[0])
# 右边第一个是非终结符,加入add集
elif re.match('[A-Z]', products[i].right[0]):
sources[source].add.append(products[i].right[0])
# 寻找产生式的或, 重复上述步骤
try:
if re.match('id|[a-z+*~()]', products[i].right[products[i].right.index('|') + 1]):
sources[source].first.append(products[i].right[products[i].right.index('|') + 1])
elif re.match('[A-Z]', products[i].right[products[i].right.index('|') + 1]):
sources[source].add.append(products[i].right[products[i].right.index('|') + 1])
except:
continue
for i in range(len(sources)):
for s in sources.keys():
if len(sources[s].add) > 0:
for s_add in sources[s].add:
sources[s].first += sources[s_add].first
for s in sources.keys():
sources[s].first = set(sources[s].first)
sources[s].reset_add()
"""
E->TE'
E'->+TE'|~
T->FT'
T'->*FT'|~
F->(E)|id
"""
def getFollow():
# todo
sources["E"].follow = ['$',')']
sources["E'"].follow = ['$',')']
sources["T"].follow = ['+','$',')']
sources["T'"].follow = ['+','$',')']
sources["F"].follow = ['*','+','$',')']
analysis_table = []
def get_analysis_table():
ret = [[0 for _ in range(6)] for i in range(5)]
ret[0][0] = ['T',"E'"]
ret[0][3] = ['T',"E'"]
ret[1][1] = ['+','T',"E'"]
ret[1][4] = ['~']
ret[1][5] = ['~']
ret[2][0] = ['F',"T'"]
ret[2][-3] = ['F',"T'"]
ret[3][1] = ['~']
ret[3][2] = ['*','F',"T'"]
ret[3][4] = ['~']
ret[3][-1] = ['~']
ret[4][0] = ['id']
ret[4][3] =['(','E',')']
return ret
def parse_string_to_pds(s) -> list:
for i in s:
p = Product(i)
products.append(p)
def parse_inp(inp)->list:
return re.findall("id|[()a-z*+]",inp)
if __name__ == '__main__':
# s = ["E->TE'",
# "E'->+TE'|~",
# "T->FT'",
# "T'->*FT'|~",
# "F->(E)|id"]
# parse_string_to_pds(s)
# getFirst()
# getFollow()
analysis_table = get_analysis_table()
st = ['$','E']
inp = parse_inp(input())
inp.append("$")
ip = 0
X = st[-1]
while X!='$':
if X==inp[ip]:
st.pop(-1)
ip+=1
elif re.match("id|[a-z+*()~]",X):
print("弹栈,弹出非终结符{}".format(X))
if X == ')':
print("括号不匹配")
exit(0)
elif analysis_table[non_ter[X]][ter[inp[ip]]]==0:
print("输入串跳过记号{},用户多输入了一个{}".format(inp[ip],inp[ip]))
exit(0)
elif analysis_table[non_ter[X]][ter[inp[ip]]]!=0:
print(X,'->',analysis_table[non_ter[X]][ter[inp[ip]]])
st.pop(-1)
for f in range(len(analysis_table[non_ter[X]][ter[inp[ip]]])-1,-1,-1):
if analysis_table[non_ter[X]][ter[inp[ip]]][f]=='~':
continue
st.append(analysis_table[non_ter[X]][ter[inp[ip]]][f])
X = st[-1]
print("0 error 0 warning")
|
{"/LR/Follow_First.py": ["/LR/product.py", "/LR/project.py", "/LR/util.py", "/LR/__init__.py", "/LL/products.py", "/LL/util.py"], "/inp.py": ["/LR/util.py", "/LR/SLR.py", "/DataStruct/MyList.py"], "/LR/util.py": ["/LR/product.py", "/LR/project.py", "/LR/__init__.py", "/LR/Ter.py"], "/LR/SLR.py": ["/LR/product.py", "/LR/project.py", "/LR/util.py", "/LR/__init__.py"], "/main.py": ["/LR/SLR.py", "/LR/util.py", "/LR/project.py"], "/LR/project.py": ["/LR/product.py"], "/LL/util.py": ["/LL/first_follow_class.py", "/LL/products.py"]}
|
19,676,875
|
AltmanD/rl-framework
|
refs/heads/main
|
/core/agent.py
|
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Type, Union
import numpy as np
from .model import Model
from .utils import get_config_params
class Agent(ABC):
def __init__(self, model_cls: Type[Model], observation_space: Any, action_space: Any, config: dict = None,
*args, **kwargs) -> None:
"""
1. Set configuration parameters (which appear after 'config') for agent and configurations for model
if specified in 'config'
2. Initialize model instances
3. Build training part of computational graph
:param model_cls: Model class that agent adopts
:param observation_space: Env observation space
:param action_space: Env action space
:param config: Configurations for agent and models
:param args: Positional configurations for agent only (ignored if specified in 'config')
:param kwargs: Keyword configurations for agent only (ignored if specified in 'config')
"""
self.model_cls = model_cls
self.observation_space = observation_space
self.action_space = action_space
# Initialize instances of 'model_cls'
self.model_instances = None
if config is not None:
self.load_config(config)
else:
self._init_model_instances(config)
@abstractmethod
def preprocess(self, state: Any, *args, **kwargs) -> Any:
"""Preprocess the game state"""
pass
@abstractmethod
def save(self, path: Path, *args, **kwargs) -> None:
"""Save the checkpoint file"""
pass
@abstractmethod
def load(self, path: Path, *args, **kwargs) -> None:
"""Load the checkpoint file"""
pass
@abstractmethod
def learn(self, *args, **kwargs) -> None:
"""Train the agent"""
pass
def export_config(self) -> dict:
"""Export dictionary as configurations"""
param_dict = {p: getattr(self, p) for p in get_config_params(Agent.__init__)}
model_config = None
if len(self.model_instances) == 1:
model_config = self.model_instances[0].export_config()
else:
model_config = [x.export_config() for x in self.model_instances]
param_dict.update({'model': model_config})
return param_dict
def load_config(self, config: dict) -> None:
"""Load dictionary as configurations and initialize model instances"""
for key, val in config.items():
if key in get_config_params(Agent.__init__):
self.__dict__[key] = val
self._init_model_instances(config)
def predict(self, state: Any, *args, **kwargs) -> Any:
"""Get the action distribution at specific state"""
return self.model_instances[0].forward(state, *args, **kwargs)
def policy(self, state: Any, *args, **kwargs) -> Any:
"""Choose action during exploitation"""
return np.argmax(self.predict(state, *args, **kwargs)[0])
def sample(self, state: Any, *args, **kwargs) -> Any:
"""Choose action during exploration/sampling"""
p = self.predict(state, *args, **kwargs)[0]
return np.random.choice(len(p), p=p)
def _init_model_instances(self, config: Union[dict, None]) -> None:
"""Initialize model instances"""
self.model_instances = []
if config is not None and 'model' in config:
model_config = config['model']
if isinstance(model_config, list):
for i, c in enumerate(model_config):
self.model_instances.append(self.model_cls(self.observation_space, self.action_space, **c))
elif isinstance(model_config, dict):
self.model_instances.append(
self.model_cls(self.observation_space, self.action_space, **model_config))
else:
self.model_instances.append(self.model_cls(self.observation_space, self.action_space))
|
{"/learner/agents/ppo/__init__.py": ["/learner/agents/ppo/ppo_agent.py", "/learner/agents/ppo/ppo_agent_keras.py"], "/learner/core/agent.py": ["/learner/core/model.py", "/learner/core/utils.py"], "/learner/agents/ppo/ppo_agent_keras.py": ["/core/__init__.py"], "/learner/models/tf_keras_model.py": ["/core/__init__.py"], "/actor/core/__init__.py": ["/actor/core/model.py"], "/learner/agents/dqn/dqn_agent_keras.py": ["/core/__init__.py"], "/learner/core/utils.py": ["/core/__init__.py"], "/learner/common.py": ["/core/__init__.py"], "/learner/core/__init__.py": ["/learner/core/agent.py", "/learner/core/env.py", "/learner/core/mem_pool.py", "/learner/core/model.py", "/learner/core/registry.py"], "/learner/core/model.py": ["/learner/core/utils.py"], "/learner/agents/dqn/__init__.py": ["/learner/agents/dqn/dqn_agent.py", "/learner/agents/dqn/dqn_agent_keras.py"], "/learner/agents/ppo/ppo_agent.py": ["/core/__init__.py"], "/actor/models/tf_v1_model.py": ["/core/__init__.py"], "/actor/envs/__init__.py": ["/actor/envs/atari/__init__.py", "/actor/envs/vec_env/__init__.py"], "/learner/agents/dqn/dqn_agent.py": ["/core/__init__.py"], "/core/agent.py": ["/core/model.py", "/core/utils.py"], "/core/model.py": ["/core/utils.py"], "/actor.py": ["/dqn/atari/__init__.py", "/dqn/cnn_model.py", "/dqn/dqn_agent.py"], "/core/utils.py": ["/core/__init__.py"], "/core/__init__.py": ["/core/agent.py", "/core/model.py"], "/dqn/dqn_agent.py": ["/core/__init__.py", "/dqn/cnn_model.py"], "/dqn/cnn_model.py": ["/core/__init__.py"], "/learner.py": ["/dqn/atari/__init__.py", "/dqn/cnn_model.py", "/dqn/dqn_agent.py"]}
|
19,676,876
|
AltmanD/rl-framework
|
refs/heads/main
|
/core/model.py
|
from abc import ABC, abstractmethod
from typing import Any
import tensorflow as tf
from .utils import get_config_params
class Model(ABC):
def __init__(self, observation_space: Any, action_space: Any, model_id: str = '0', config: dict = None,
*args, **kwargs) -> None:
"""
1. Set configuration parameters (which appear after 'config')
2. Define layers and tensors
3. Build model
:param model_id: The identifier of the model
:param config: Configurations of hyper-parameters
:param args: Positional configurations (ignored if specified in 'config')
:param kwargs: Keyword configurations (ignored if specified in 'config')
"""
self.observation_space = observation_space
self.action_space = action_space
self.model_id = model_id
self.config = config
if config is not None:
self.load_config(config)
else:
self.build()
@abstractmethod
def build(self, *args, **kwargs) -> None:
"""Build the computational graph"""
pass
@abstractmethod
def set_weights(self, *args, **kwargs) -> None:
pass
@abstractmethod
def get_weights(self, *args, **kwargs) -> Any:
pass
@abstractmethod
def forward(self, states: Any, *args, **kwargs) -> Any:
pass
def export_config(self) -> dict:
"""Export dictionary as configurations"""
config_params = get_config_params(Model.__init__)
return {p: getattr(self, p) for p in config_params}
def load_config(self, config: dict) -> None:
"""Load dictionary as configurations and build model"""
for key, val in config.items():
if key in get_config_params(Model.__init__):
self.__dict__[key] = val
self.build()
def __call__(self, *args, **kwargs) -> Any:
with tf.get_default_session() as sess:
return self.forward(sess, *args, **kwargs)
|
{"/learner/agents/ppo/__init__.py": ["/learner/agents/ppo/ppo_agent.py", "/learner/agents/ppo/ppo_agent_keras.py"], "/learner/core/agent.py": ["/learner/core/model.py", "/learner/core/utils.py"], "/learner/agents/ppo/ppo_agent_keras.py": ["/core/__init__.py"], "/learner/models/tf_keras_model.py": ["/core/__init__.py"], "/actor/core/__init__.py": ["/actor/core/model.py"], "/learner/agents/dqn/dqn_agent_keras.py": ["/core/__init__.py"], "/learner/core/utils.py": ["/core/__init__.py"], "/learner/common.py": ["/core/__init__.py"], "/learner/core/__init__.py": ["/learner/core/agent.py", "/learner/core/env.py", "/learner/core/mem_pool.py", "/learner/core/model.py", "/learner/core/registry.py"], "/learner/core/model.py": ["/learner/core/utils.py"], "/learner/agents/dqn/__init__.py": ["/learner/agents/dqn/dqn_agent.py", "/learner/agents/dqn/dqn_agent_keras.py"], "/learner/agents/ppo/ppo_agent.py": ["/core/__init__.py"], "/actor/models/tf_v1_model.py": ["/core/__init__.py"], "/actor/envs/__init__.py": ["/actor/envs/atari/__init__.py", "/actor/envs/vec_env/__init__.py"], "/learner/agents/dqn/dqn_agent.py": ["/core/__init__.py"], "/core/agent.py": ["/core/model.py", "/core/utils.py"], "/core/model.py": ["/core/utils.py"], "/actor.py": ["/dqn/atari/__init__.py", "/dqn/cnn_model.py", "/dqn/dqn_agent.py"], "/core/utils.py": ["/core/__init__.py"], "/core/__init__.py": ["/core/agent.py", "/core/model.py"], "/dqn/dqn_agent.py": ["/core/__init__.py", "/dqn/cnn_model.py"], "/dqn/cnn_model.py": ["/core/__init__.py"], "/learner.py": ["/dqn/atari/__init__.py", "/dqn/cnn_model.py", "/dqn/dqn_agent.py"]}
|
19,676,877
|
AltmanD/rl-framework
|
refs/heads/main
|
/actor.py
|
import numpy as np
import zmq
from dqn.atari import AtariEnv
from dqn.cnn_model import CNNModel
from dqn.dqn_agent import DQNAgent
from dqn.protobuf.data import Data, arr2bytes
def main():
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://127.0.0.1:5000")
env = AtariEnv('PongNoFrameskip-v4', 4)
timesteps = 1000000
dqn_agent = DQNAgent(
CNNModel,
env.get_observation_space(),
env.get_action_space()
)
episode_rewards = [0.0]
state = env.reset()
for step in range(timesteps):
weights = socket.recv()
if len(weights):
dqn_agent.set_weights(weights)
# Adjust Epsilon
dqn_agent.adjust_epsilon(step, timesteps)
# Sample action
action = dqn_agent.sample(state)
next_state, reward, done, info = env.step(action)
# Send transition
data = Data(
state=arr2bytes(state),
action=int(action),
reward=reward,
next_state=arr2bytes(next_state),
done=done, epoch=step
)
socket.send(data.SerializeToString())
state = next_state
episode_rewards[-1] += reward
if done:
num_episodes = len(episode_rewards)
mean_100ep_reward = round(np.mean(episode_rewards[-100:]), 2)
print(f'Episode: {num_episodes}, Step: {step + 1}/{timesteps}, Mean Reward: {mean_100ep_reward}, '
f'Epsilon: {dqn_agent.epsilon:.3f}')
state = env.reset()
episode_rewards.append(0.0)
if __name__ == '__main__':
main()
|
{"/learner/agents/ppo/__init__.py": ["/learner/agents/ppo/ppo_agent.py", "/learner/agents/ppo/ppo_agent_keras.py"], "/learner/core/agent.py": ["/learner/core/model.py", "/learner/core/utils.py"], "/learner/agents/ppo/ppo_agent_keras.py": ["/core/__init__.py"], "/learner/models/tf_keras_model.py": ["/core/__init__.py"], "/actor/core/__init__.py": ["/actor/core/model.py"], "/learner/agents/dqn/dqn_agent_keras.py": ["/core/__init__.py"], "/learner/core/utils.py": ["/core/__init__.py"], "/learner/common.py": ["/core/__init__.py"], "/learner/core/__init__.py": ["/learner/core/agent.py", "/learner/core/env.py", "/learner/core/mem_pool.py", "/learner/core/model.py", "/learner/core/registry.py"], "/learner/core/model.py": ["/learner/core/utils.py"], "/learner/agents/dqn/__init__.py": ["/learner/agents/dqn/dqn_agent.py", "/learner/agents/dqn/dqn_agent_keras.py"], "/learner/agents/ppo/ppo_agent.py": ["/core/__init__.py"], "/actor/models/tf_v1_model.py": ["/core/__init__.py"], "/actor/envs/__init__.py": ["/actor/envs/atari/__init__.py", "/actor/envs/vec_env/__init__.py"], "/learner/agents/dqn/dqn_agent.py": ["/core/__init__.py"], "/core/agent.py": ["/core/model.py", "/core/utils.py"], "/core/model.py": ["/core/utils.py"], "/actor.py": ["/dqn/atari/__init__.py", "/dqn/cnn_model.py", "/dqn/dqn_agent.py"], "/core/utils.py": ["/core/__init__.py"], "/core/__init__.py": ["/core/agent.py", "/core/model.py"], "/dqn/dqn_agent.py": ["/core/__init__.py", "/dqn/cnn_model.py"], "/dqn/cnn_model.py": ["/core/__init__.py"], "/learner.py": ["/dqn/atari/__init__.py", "/dqn/cnn_model.py", "/dqn/dqn_agent.py"]}
|
19,676,878
|
AltmanD/rl-framework
|
refs/heads/main
|
/core/utils.py
|
import inspect
from typing import Callable, List
import core
def get_config_params(init_func: Callable) -> List[str]:
"""
Return configurable parameters in 'Agent.__init__' and 'Model.__init__' which appear after 'config'
:param init_func: 'Agent.__init__' or 'Model.__init__'
:return: A list of configurable parameters
"""
if init_func is not core.Agent.__init__ and init_func is not core.Model.__init__:
raise ValueError("Only accepts 'Agent.__init__' or 'Model.__init__'")
sig = list(inspect.signature(init_func).parameters.keys())
config_params = []
config_part = False
for param in sig:
if param == 'config':
# Following parameters should be what we want
config_part = True
elif config_part:
config_params.append(param)
return config_params
#
# def set_config_params(obj: object, config_params: List[str], config: dict, default_values: dict) -> dict:
# """
# Set configurations in following order
# 1. Configurations in 'config'
# 2. Configurations set in 'config_params' argument
# 3. Default configurations
# :param obj: 'Agent' or 'Model' instance
# :param config_params: A list of configurable parameters
# :param config: Configuration file
# :param default_values: Default values for 'config_params'
# :return: Configured parameters
# """
# param_dict = {p: None for p in config_params}
#
# for p in param_dict.keys():
# if config is not None and p in config:
# param_dict[p] = config[p]
# else:
# param_dict[p] = default_values[p]
#
#
# for p, val in param_dict.items():
# if val is None:
# param_dict[p] = default_values[p]
#
# return param_dict
|
{"/learner/agents/ppo/__init__.py": ["/learner/agents/ppo/ppo_agent.py", "/learner/agents/ppo/ppo_agent_keras.py"], "/learner/core/agent.py": ["/learner/core/model.py", "/learner/core/utils.py"], "/learner/agents/ppo/ppo_agent_keras.py": ["/core/__init__.py"], "/learner/models/tf_keras_model.py": ["/core/__init__.py"], "/actor/core/__init__.py": ["/actor/core/model.py"], "/learner/agents/dqn/dqn_agent_keras.py": ["/core/__init__.py"], "/learner/core/utils.py": ["/core/__init__.py"], "/learner/common.py": ["/core/__init__.py"], "/learner/core/__init__.py": ["/learner/core/agent.py", "/learner/core/env.py", "/learner/core/mem_pool.py", "/learner/core/model.py", "/learner/core/registry.py"], "/learner/core/model.py": ["/learner/core/utils.py"], "/learner/agents/dqn/__init__.py": ["/learner/agents/dqn/dqn_agent.py", "/learner/agents/dqn/dqn_agent_keras.py"], "/learner/agents/ppo/ppo_agent.py": ["/core/__init__.py"], "/actor/models/tf_v1_model.py": ["/core/__init__.py"], "/actor/envs/__init__.py": ["/actor/envs/atari/__init__.py", "/actor/envs/vec_env/__init__.py"], "/learner/agents/dqn/dqn_agent.py": ["/core/__init__.py"], "/core/agent.py": ["/core/model.py", "/core/utils.py"], "/core/model.py": ["/core/utils.py"], "/actor.py": ["/dqn/atari/__init__.py", "/dqn/cnn_model.py", "/dqn/dqn_agent.py"], "/core/utils.py": ["/core/__init__.py"], "/core/__init__.py": ["/core/agent.py", "/core/model.py"], "/dqn/dqn_agent.py": ["/core/__init__.py", "/dqn/cnn_model.py"], "/dqn/cnn_model.py": ["/core/__init__.py"], "/learner.py": ["/dqn/atari/__init__.py", "/dqn/cnn_model.py", "/dqn/dqn_agent.py"]}
|
19,676,879
|
AltmanD/rl-framework
|
refs/heads/main
|
/dqn/atari/__init__.py
|
from .env import AtariEnv
|
{"/learner/agents/ppo/__init__.py": ["/learner/agents/ppo/ppo_agent.py", "/learner/agents/ppo/ppo_agent_keras.py"], "/learner/core/agent.py": ["/learner/core/model.py", "/learner/core/utils.py"], "/learner/agents/ppo/ppo_agent_keras.py": ["/core/__init__.py"], "/learner/models/tf_keras_model.py": ["/core/__init__.py"], "/actor/core/__init__.py": ["/actor/core/model.py"], "/learner/agents/dqn/dqn_agent_keras.py": ["/core/__init__.py"], "/learner/core/utils.py": ["/core/__init__.py"], "/learner/common.py": ["/core/__init__.py"], "/learner/core/__init__.py": ["/learner/core/agent.py", "/learner/core/env.py", "/learner/core/mem_pool.py", "/learner/core/model.py", "/learner/core/registry.py"], "/learner/core/model.py": ["/learner/core/utils.py"], "/learner/agents/dqn/__init__.py": ["/learner/agents/dqn/dqn_agent.py", "/learner/agents/dqn/dqn_agent_keras.py"], "/learner/agents/ppo/ppo_agent.py": ["/core/__init__.py"], "/actor/models/tf_v1_model.py": ["/core/__init__.py"], "/actor/envs/__init__.py": ["/actor/envs/atari/__init__.py", "/actor/envs/vec_env/__init__.py"], "/learner/agents/dqn/dqn_agent.py": ["/core/__init__.py"], "/core/agent.py": ["/core/model.py", "/core/utils.py"], "/core/model.py": ["/core/utils.py"], "/actor.py": ["/dqn/atari/__init__.py", "/dqn/cnn_model.py", "/dqn/dqn_agent.py"], "/core/utils.py": ["/core/__init__.py"], "/core/__init__.py": ["/core/agent.py", "/core/model.py"], "/dqn/dqn_agent.py": ["/core/__init__.py", "/dqn/cnn_model.py"], "/dqn/cnn_model.py": ["/core/__init__.py"], "/learner.py": ["/dqn/atari/__init__.py", "/dqn/cnn_model.py", "/dqn/dqn_agent.py"]}
|
19,676,880
|
AltmanD/rl-framework
|
refs/heads/main
|
/core/__init__.py
|
from .agent import Agent
from .env import Env
from .model import Model
|
{"/learner/agents/ppo/__init__.py": ["/learner/agents/ppo/ppo_agent.py", "/learner/agents/ppo/ppo_agent_keras.py"], "/learner/core/agent.py": ["/learner/core/model.py", "/learner/core/utils.py"], "/learner/agents/ppo/ppo_agent_keras.py": ["/core/__init__.py"], "/learner/models/tf_keras_model.py": ["/core/__init__.py"], "/actor/core/__init__.py": ["/actor/core/model.py"], "/learner/agents/dqn/dqn_agent_keras.py": ["/core/__init__.py"], "/learner/core/utils.py": ["/core/__init__.py"], "/learner/common.py": ["/core/__init__.py"], "/learner/core/__init__.py": ["/learner/core/agent.py", "/learner/core/env.py", "/learner/core/mem_pool.py", "/learner/core/model.py", "/learner/core/registry.py"], "/learner/core/model.py": ["/learner/core/utils.py"], "/learner/agents/dqn/__init__.py": ["/learner/agents/dqn/dqn_agent.py", "/learner/agents/dqn/dqn_agent_keras.py"], "/learner/agents/ppo/ppo_agent.py": ["/core/__init__.py"], "/actor/models/tf_v1_model.py": ["/core/__init__.py"], "/actor/envs/__init__.py": ["/actor/envs/atari/__init__.py", "/actor/envs/vec_env/__init__.py"], "/learner/agents/dqn/dqn_agent.py": ["/core/__init__.py"], "/core/agent.py": ["/core/model.py", "/core/utils.py"], "/core/model.py": ["/core/utils.py"], "/actor.py": ["/dqn/atari/__init__.py", "/dqn/cnn_model.py", "/dqn/dqn_agent.py"], "/core/utils.py": ["/core/__init__.py"], "/core/__init__.py": ["/core/agent.py", "/core/model.py"], "/dqn/dqn_agent.py": ["/core/__init__.py", "/dqn/cnn_model.py"], "/dqn/cnn_model.py": ["/core/__init__.py"], "/learner.py": ["/dqn/atari/__init__.py", "/dqn/cnn_model.py", "/dqn/dqn_agent.py"]}
|
19,676,881
|
AltmanD/rl-framework
|
refs/heads/main
|
/dqn/dqn_agent.py
|
from typing import Any
import numpy as np
from tensorflow.keras.optimizers import RMSprop
from core import Agent
from .cnn_model import CNNModel
from .replay_buffer import ReplayBuffer
class DQNAgent(Agent):
def __init__(self, model_cls, observation_space, action_space, config=None, batch_size=32, epsilon=1,
epsilon_min=0.01, gamma=0.99, buffer_size=5000, update_freq=1000, training_start=10000, *args,
**kwargs):
# Default configurations
self.batch_size = batch_size
self.epsilon = epsilon
self.epsilon_min = epsilon_min
self.gamma = gamma
self.buffer_size = buffer_size
self.update_freq = update_freq
self.training_start = 10000
# Default model config
if config is None:
config = {}
config['model'] = [
{'model_id': 'policy_model'},
{'model_id': 'target_model'}
]
super(DQNAgent, self).__init__(model_cls, observation_space, action_space, config, *args, **kwargs)
# Update target model
self.policy_model: CNNModel = self.model_instances[0]
self.target_model: CNNModel = self.model_instances[1]
self.update_target_model()
# Compile model
opt = RMSprop(learning_rate=0.0001)
self.policy_model.model.compile(loss='huber_loss', optimizer=opt)
# Initialize replay buffer
self.memory = ReplayBuffer(buffer_size)
def learn(self, *args, **kwargs) -> None:
states, actions, rewards, next_states, dones = self.memory.sample(self.batch_size)
next_action = np.argmax(self.policy_model.forward(next_states), axis=-1)
target = rewards + (1 - dones) * self.gamma * self.target_model.forward(next_states)[
np.arange(self.batch_size), next_action]
target_f = self.target_model.forward(states)
target_f[np.arange(self.batch_size), actions] = target
self.policy_model.model.fit(states, target_f, epochs=1, verbose=1)
def sample(self, state, *args, **kwargs):
if np.random.rand() <= self.epsilon:
return np.random.randint(self.action_space)
else:
act_values = self.policy_model.forward(state[np.newaxis])
return np.argmax(act_values[0])
def memorize(self, state, action, reward, next_state, done):
self.memory.add(state, action, reward, next_state, done)
def preprocess(self, state: Any, *args, **kwargs) -> Any:
raise NotImplemented
def update_target_model(self):
self.target_model.set_weights(self.policy_model.get_weights())
def set_weights(self, weights):
self.policy_model.set_weights(weights)
self.update_target_model()
def get_weights(self):
return self.policy_model.get_weights()
def adjust_epsilon(self, current_step, total_steps):
fraction = min(1.0, float(current_step) / total_steps)
self.epsilon = 1 + fraction * (self.epsilon_min - 1)
def save(self, path, *args, **kwargs) -> None:
self.policy_model.model.save(path)
def load(self, path, *args, **kwargs) -> None:
self.policy_model.model.load(path)
|
{"/learner/agents/ppo/__init__.py": ["/learner/agents/ppo/ppo_agent.py", "/learner/agents/ppo/ppo_agent_keras.py"], "/learner/core/agent.py": ["/learner/core/model.py", "/learner/core/utils.py"], "/learner/agents/ppo/ppo_agent_keras.py": ["/core/__init__.py"], "/learner/models/tf_keras_model.py": ["/core/__init__.py"], "/actor/core/__init__.py": ["/actor/core/model.py"], "/learner/agents/dqn/dqn_agent_keras.py": ["/core/__init__.py"], "/learner/core/utils.py": ["/core/__init__.py"], "/learner/common.py": ["/core/__init__.py"], "/learner/core/__init__.py": ["/learner/core/agent.py", "/learner/core/env.py", "/learner/core/mem_pool.py", "/learner/core/model.py", "/learner/core/registry.py"], "/learner/core/model.py": ["/learner/core/utils.py"], "/learner/agents/dqn/__init__.py": ["/learner/agents/dqn/dqn_agent.py", "/learner/agents/dqn/dqn_agent_keras.py"], "/learner/agents/ppo/ppo_agent.py": ["/core/__init__.py"], "/actor/models/tf_v1_model.py": ["/core/__init__.py"], "/actor/envs/__init__.py": ["/actor/envs/atari/__init__.py", "/actor/envs/vec_env/__init__.py"], "/learner/agents/dqn/dqn_agent.py": ["/core/__init__.py"], "/core/agent.py": ["/core/model.py", "/core/utils.py"], "/core/model.py": ["/core/utils.py"], "/actor.py": ["/dqn/atari/__init__.py", "/dqn/cnn_model.py", "/dqn/dqn_agent.py"], "/core/utils.py": ["/core/__init__.py"], "/core/__init__.py": ["/core/agent.py", "/core/model.py"], "/dqn/dqn_agent.py": ["/core/__init__.py", "/dqn/cnn_model.py"], "/dqn/cnn_model.py": ["/core/__init__.py"], "/learner.py": ["/dqn/atari/__init__.py", "/dqn/cnn_model.py", "/dqn/dqn_agent.py"]}
|
19,676,882
|
AltmanD/rl-framework
|
refs/heads/main
|
/dqn/cnn_model.py
|
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Conv2D, Dense, Flatten
from core import Model
class CNNModel(Model):
def __init__(self, observation_space, action_space, model_id='0', config=None, conv=None, fc=None,
*args, **kwargs):
# Default configurations
self.conv = [
{'filters': 16, 'kernel_size': 8, 'strides': 4, 'activation': 'relu'},
{'filters': 32, 'kernel_size': 4, 'strides': 2, 'activation': 'relu'},
] if conv is None else conv
self.fc = [
{'units': 256, 'activation': 'relu'},
{'units': action_space, 'activation': 'linear'}
] if fc is None else fc
# Define layers
self.conv_layers = [Conv2D(**self.conv[0], input_shape=observation_space)]
self.conv_layers += [Conv2D(**x) for x in self.conv[1:]]
self.flatten = Flatten()
self.dense_layers = [Dense(**x) for x in self.fc]
self.model = None
super(CNNModel, self).__init__(observation_space, action_space, model_id, config, *args, **kwargs)
def build(self):
self.model = Sequential()
for conv_layer in self.conv_layers:
self.model.add(conv_layer)
self.model.add(self.flatten)
for dense_layer in self.dense_layers:
self.model.add(dense_layer)
def set_weights(self, weights, *args, **kwargs):
self.model.set_weights(weights)
def get_weights(self, *args, **kwargs):
return self.model.get_weights()
def forward(self, states, *args, **kwargs):
return self.model.predict(states)
|
{"/learner/agents/ppo/__init__.py": ["/learner/agents/ppo/ppo_agent.py", "/learner/agents/ppo/ppo_agent_keras.py"], "/learner/core/agent.py": ["/learner/core/model.py", "/learner/core/utils.py"], "/learner/agents/ppo/ppo_agent_keras.py": ["/core/__init__.py"], "/learner/models/tf_keras_model.py": ["/core/__init__.py"], "/actor/core/__init__.py": ["/actor/core/model.py"], "/learner/agents/dqn/dqn_agent_keras.py": ["/core/__init__.py"], "/learner/core/utils.py": ["/core/__init__.py"], "/learner/common.py": ["/core/__init__.py"], "/learner/core/__init__.py": ["/learner/core/agent.py", "/learner/core/env.py", "/learner/core/mem_pool.py", "/learner/core/model.py", "/learner/core/registry.py"], "/learner/core/model.py": ["/learner/core/utils.py"], "/learner/agents/dqn/__init__.py": ["/learner/agents/dqn/dqn_agent.py", "/learner/agents/dqn/dqn_agent_keras.py"], "/learner/agents/ppo/ppo_agent.py": ["/core/__init__.py"], "/actor/models/tf_v1_model.py": ["/core/__init__.py"], "/actor/envs/__init__.py": ["/actor/envs/atari/__init__.py", "/actor/envs/vec_env/__init__.py"], "/learner/agents/dqn/dqn_agent.py": ["/core/__init__.py"], "/core/agent.py": ["/core/model.py", "/core/utils.py"], "/core/model.py": ["/core/utils.py"], "/actor.py": ["/dqn/atari/__init__.py", "/dqn/cnn_model.py", "/dqn/dqn_agent.py"], "/core/utils.py": ["/core/__init__.py"], "/core/__init__.py": ["/core/agent.py", "/core/model.py"], "/dqn/dqn_agent.py": ["/core/__init__.py", "/dqn/cnn_model.py"], "/dqn/cnn_model.py": ["/core/__init__.py"], "/learner.py": ["/dqn/atari/__init__.py", "/dqn/cnn_model.py", "/dqn/dqn_agent.py"]}
|
19,676,883
|
AltmanD/rl-framework
|
refs/heads/main
|
/learner.py
|
import zmq
from dqn.atari import AtariEnv
from dqn.cnn_model import CNNModel
from dqn.dqn_agent import DQNAgent
from dqn.protobuf.data import Data, bytes2arr
def main():
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect("tcp://127.0.0.1:5000")
env = AtariEnv('PongNoFrameskip-v4', 4)
timesteps = 1000000
dqn_agent = DQNAgent(
CNNModel,
env.get_observation_space(),
env.get_action_space()
)
weight = b''
for step in range(timesteps):
socket.send(weight)
weight = b''
data = Data()
data.ParseFromString(socket.recv())
state, next_state = bytes2arr(data.state), bytes2arr(data.next_state)
dqn_agent.memorize(state, data.action, data.reward, next_state, data.done)
if step > dqn_agent.training_start:
dqn_agent.learn()
if step % dqn_agent.update_freq == 0:
dqn_agent.update_target_model()
if __name__ == '__main__':
main()
|
{"/learner/agents/ppo/__init__.py": ["/learner/agents/ppo/ppo_agent.py", "/learner/agents/ppo/ppo_agent_keras.py"], "/learner/core/agent.py": ["/learner/core/model.py", "/learner/core/utils.py"], "/learner/agents/ppo/ppo_agent_keras.py": ["/core/__init__.py"], "/learner/models/tf_keras_model.py": ["/core/__init__.py"], "/actor/core/__init__.py": ["/actor/core/model.py"], "/learner/agents/dqn/dqn_agent_keras.py": ["/core/__init__.py"], "/learner/core/utils.py": ["/core/__init__.py"], "/learner/common.py": ["/core/__init__.py"], "/learner/core/__init__.py": ["/learner/core/agent.py", "/learner/core/env.py", "/learner/core/mem_pool.py", "/learner/core/model.py", "/learner/core/registry.py"], "/learner/core/model.py": ["/learner/core/utils.py"], "/learner/agents/dqn/__init__.py": ["/learner/agents/dqn/dqn_agent.py", "/learner/agents/dqn/dqn_agent_keras.py"], "/learner/agents/ppo/ppo_agent.py": ["/core/__init__.py"], "/actor/models/tf_v1_model.py": ["/core/__init__.py"], "/actor/envs/__init__.py": ["/actor/envs/atari/__init__.py", "/actor/envs/vec_env/__init__.py"], "/learner/agents/dqn/dqn_agent.py": ["/core/__init__.py"], "/core/agent.py": ["/core/model.py", "/core/utils.py"], "/core/model.py": ["/core/utils.py"], "/actor.py": ["/dqn/atari/__init__.py", "/dqn/cnn_model.py", "/dqn/dqn_agent.py"], "/core/utils.py": ["/core/__init__.py"], "/core/__init__.py": ["/core/agent.py", "/core/model.py"], "/dqn/dqn_agent.py": ["/core/__init__.py", "/dqn/cnn_model.py"], "/dqn/cnn_model.py": ["/core/__init__.py"], "/learner.py": ["/dqn/atari/__init__.py", "/dqn/cnn_model.py", "/dqn/dqn_agent.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.