diff --git a/RockPaperScissor/.DS_Store b/RockPaperScissor/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..1a5e67981deb8978a7f01f6ec078bb0ef0c401f4 Binary files /dev/null and b/RockPaperScissor/.DS_Store differ diff --git a/RockPaperScissor/__init__.py b/RockPaperScissor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..139597f9cb07c5d48bed18984ec4747f4b4f3438 --- /dev/null +++ b/RockPaperScissor/__init__.py @@ -0,0 +1,2 @@ + + diff --git a/RockPaperScissor/__pycache__/__init__.cpython-310.pyc b/RockPaperScissor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3389e1cda5ebe2ff87bda7894fa6aa63e5250008 Binary files /dev/null and b/RockPaperScissor/__pycache__/__init__.cpython-310.pyc differ diff --git a/RockPaperScissor/__pycache__/__init__.cpython-313.pyc b/RockPaperScissor/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d67d0255335bf86f29ecd98e53a52241e90c1b14 Binary files /dev/null and b/RockPaperScissor/__pycache__/__init__.cpython-313.pyc differ diff --git a/RockPaperScissor/config/.gitkeep b/RockPaperScissor/config/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..cb0353a95cdbc8e38a7a6e60c9e3db1499eb99b0 --- /dev/null +++ b/RockPaperScissor/config/.gitkeep @@ -0,0 +1 @@ +# Placeholder to keep the config directory tracked by git \ No newline at end of file diff --git a/RockPaperScissor/config/database.py b/RockPaperScissor/config/database.py new file mode 100644 index 0000000000000000000000000000000000000000..572bf9fb285154fea58752a3cb9a554a41120323 --- /dev/null +++ b/RockPaperScissor/config/database.py @@ -0,0 +1,31 @@ +# Minimal placeholder for database configuration +# RockPaperScissor/config/database.py +import os +from pathlib import Path + +# Base directory for data storage +BASE_DATA_DIR = Path("data") +BASE_DATA_DIR.mkdir(parents=True, exist_ok=True) + +# SQLite configuration +SQLITE_CONFIG = { + "db_path": str(BASE_DATA_DIR / "game_history.db"), + "timeout": 5.0, + "check_same_thread": False, +} + +# Storage configuration +STORAGE_CONFIG = { + "primary": "combined", + "cache_size": 1000, + "auto_cleanup": True, + "cleanup_interval": 3600 # 1 hour in seconds +} + +# S3 configuration +S3_CONFIG = { + "bucket_name": os.getenv("AWS_S3_BUCKET_NAME", "your-bucket-name"), + "region_name": os.getenv("AWS_REGION", "us-east-1"), + "access_key_id": os.getenv("AWS_ACCESS_KEY_ID"), + "secret_access_key": os.getenv("AWS_SECRET_ACCESS_KEY") +} \ No newline at end of file diff --git a/RockPaperScissor/game_cache/__init__.py b/RockPaperScissor/game_cache/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..36ac26b4e5b15e75c456cb47666bcb1260f2f853 --- /dev/null +++ b/RockPaperScissor/game_cache/__init__.py @@ -0,0 +1,8 @@ + # RockPaperScissor/game_cache/__init__.py +from .memory_cache import GameSessionCache # This will be our DummyGameSessionCache +# from .llm_cache import LLMCache # Add LLMCache later if/when LLMService is integrated + +__all__ = [ + 'GameSessionCache', + # 'LLMCache', +] \ No newline at end of file diff --git a/RockPaperScissor/game_cache/__pycache__/__init__.cpython-310.pyc b/RockPaperScissor/game_cache/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..147b0f3d46e55d7ec13a91976a1d145fb945dcce Binary files /dev/null and b/RockPaperScissor/game_cache/__pycache__/__init__.cpython-310.pyc differ diff --git a/RockPaperScissor/game_cache/__pycache__/__init__.cpython-313.pyc b/RockPaperScissor/game_cache/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a270900c2b9f5fc220d241f60cb8a819763fab1 Binary files /dev/null and b/RockPaperScissor/game_cache/__pycache__/__init__.cpython-313.pyc differ diff --git a/RockPaperScissor/game_cache/__pycache__/memory_cache.cpython-310.pyc b/RockPaperScissor/game_cache/__pycache__/memory_cache.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dcb3e96ce094b265e40f80711a207aa2b830a886 Binary files /dev/null and b/RockPaperScissor/game_cache/__pycache__/memory_cache.cpython-310.pyc differ diff --git a/RockPaperScissor/game_cache/__pycache__/memory_cache.cpython-313.pyc b/RockPaperScissor/game_cache/__pycache__/memory_cache.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92b3197cf975db27dbaba642e7a5df9e4bb9401e Binary files /dev/null and b/RockPaperScissor/game_cache/__pycache__/memory_cache.cpython-313.pyc differ diff --git a/RockPaperScissor/game_cache/memory_cache.py b/RockPaperScissor/game_cache/memory_cache.py new file mode 100644 index 0000000000000000000000000000000000000000..5a9761859e530828e1b9435f785503852d8c3c00 --- /dev/null +++ b/RockPaperScissor/game_cache/memory_cache.py @@ -0,0 +1,84 @@ +# RockPaperScissor/game_cache/memory_cache.py +from typing import Dict, Any, Optional +from dataclasses import dataclass +from datetime import datetime + +@dataclass +class GameData: + session_id: str + player_move: str + ai_move: str + result: str + ai_type: str + ai_state: Dict[str, Any] + created_at: datetime = datetime.now() + +class GameSessionCache: + def __init__(self): + self._sessions: Dict[str, Dict[str, Any]] = {} + self._max_cache_size = 1000 # Maximum number of sessions to cache + + def update_session(self, session_id: str, game_data: Dict[str, Any]) -> None: + """Update session data in cache""" + if session_id not in self._sessions: + self._sessions[session_id] = { + 'total_rounds': 0, + 'player_wins': 0, + 'ai_wins': 0, + 'draws': 0, + 'rounds': [], + 'last_updated': datetime.now() + } + + session = self._sessions[session_id] + + # Update session stats + session['total_rounds'] += 1 + if game_data['result'] == 'player_win': + session['player_wins'] += 1 + elif game_data['result'] == 'ai_win': + session['ai_wins'] += 1 + else: + session['draws'] += 1 + + # Add round data + session['rounds'].append({ + 'round_number': session['total_rounds'], + 'player_move': game_data['player_move'], + 'ai_move': game_data['ai_move'], + 'result': game_data['result'], + 'ai_type': game_data.get('ai_type', 'random'), + 'ai_state': game_data.get('ai_state', {}), + 'created_at': datetime.now() + }) + + # Update last updated timestamp + session['last_updated'] = datetime.now() + + # Enforce cache size limit + if len(self._sessions) > self._max_cache_size: + self._remove_oldest_session() + + def get_session(self, session_id: str) -> Optional[Dict[str, Any]]: + """Get session data from cache""" + return self._sessions.get(session_id) + + def _remove_oldest_session(self) -> None: + """Remove the oldest session from cache""" + if not self._sessions: + return + + oldest_session = min( + self._sessions.items(), + key=lambda x: x[1]['last_updated'] + ) + del self._sessions[oldest_session[0]] + + def clear(self) -> None: + """Clear all cached data""" + self._sessions.clear() + + def remove_session(self, session_id: str) -> None: + """Remove a specific session from cache""" + if session_id in self._sessions: + del self._sessions[session_id] \ No newline at end of file diff --git a/RockPaperScissor/models/__init__.py b/RockPaperScissor/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..62eb502051726d7aea6ca33070302c02de85cbc4 --- /dev/null +++ b/RockPaperScissor/models/__init__.py @@ -0,0 +1,17 @@ +# RockPaperScissor/models/__init__.py +from .random_ai import RandomAI +from .adaptive_markov_ai import AdaptiveMarkovAI +# Import other AIs like PatternAI, MarkovAI later + +# This dictionary will be used by GameService to get AI instances +AI_MODELS = { + "random": RandomAI(), + "adaptive_markov": AdaptiveMarkovAI(), + # "pattern": PatternAI(), # Add later + # "markov": MarkovAI(), # Add later +} + +def get_ai(model_name: str): + """Retrieves an AI instance based on the model name.""" + # Fallback to RandomAI if the requested model isn't found or if empty + return AI_MODELS.get(model_name.lower(), RandomAI()) \ No newline at end of file diff --git a/RockPaperScissor/models/__pycache__/__init__.cpython-310.pyc b/RockPaperScissor/models/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b364a04a31ac6dfbbd75b312eded2553e65069c5 Binary files /dev/null and b/RockPaperScissor/models/__pycache__/__init__.cpython-310.pyc differ diff --git a/RockPaperScissor/models/__pycache__/__init__.cpython-313.pyc b/RockPaperScissor/models/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ae02dbe55c43e071aa94c4b60a2459b9500926d Binary files /dev/null and b/RockPaperScissor/models/__pycache__/__init__.cpython-313.pyc differ diff --git a/RockPaperScissor/models/__pycache__/adaptive_markov_ai.cpython-310.pyc b/RockPaperScissor/models/__pycache__/adaptive_markov_ai.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..951fc6c2aac3c0c3e834b0dff1440fb9c98649a5 Binary files /dev/null and b/RockPaperScissor/models/__pycache__/adaptive_markov_ai.cpython-310.pyc differ diff --git a/RockPaperScissor/models/__pycache__/adaptive_markov_ai.cpython-313.pyc b/RockPaperScissor/models/__pycache__/adaptive_markov_ai.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0522a0255df6781a906ff03be3c77190d2010d7b Binary files /dev/null and b/RockPaperScissor/models/__pycache__/adaptive_markov_ai.cpython-313.pyc differ diff --git a/RockPaperScissor/models/__pycache__/base_ai.cpython-310.pyc b/RockPaperScissor/models/__pycache__/base_ai.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac82e5f00b216a7662833cc889ea06fbf38c1a9e Binary files /dev/null and b/RockPaperScissor/models/__pycache__/base_ai.cpython-310.pyc differ diff --git a/RockPaperScissor/models/__pycache__/base_ai.cpython-313.pyc b/RockPaperScissor/models/__pycache__/base_ai.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..807101e114ca72526a0013a0619424cd6b24d761 Binary files /dev/null and b/RockPaperScissor/models/__pycache__/base_ai.cpython-313.pyc differ diff --git a/RockPaperScissor/models/__pycache__/random_ai.cpython-310.pyc b/RockPaperScissor/models/__pycache__/random_ai.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7506f31be469cf430bbbda1c327d872e4a97795 Binary files /dev/null and b/RockPaperScissor/models/__pycache__/random_ai.cpython-310.pyc differ diff --git a/RockPaperScissor/models/__pycache__/random_ai.cpython-313.pyc b/RockPaperScissor/models/__pycache__/random_ai.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a0076807290c0f85e52de4eb4e799c4f84f7e7e Binary files /dev/null and b/RockPaperScissor/models/__pycache__/random_ai.cpython-313.pyc differ diff --git a/RockPaperScissor/models/adaptive_markov_ai.py b/RockPaperScissor/models/adaptive_markov_ai.py new file mode 100644 index 0000000000000000000000000000000000000000..4dca1a09bea029b81496c62c2ccaec4dcfe26862 --- /dev/null +++ b/RockPaperScissor/models/adaptive_markov_ai.py @@ -0,0 +1,151 @@ +from typing import Dict, Any, Optional, Tuple +import math +import numpy as np + +from .base_ai import BaseAI + +class AdaptiveMarkovAI(BaseAI): + """ + Adaptive RPS AI that uses entropy-based weighting between Markov and Frequency models. + """ + def __init__(self, smoothing_factor=1.0, temperature=1.0): + super().__init__() + self.smoothing = smoothing_factor + self.temperature = temperature + + def make_move(self, model_state: Optional[Dict[str, Any]] = None) -> Tuple[str, Dict[str, Any]]: + """ + Generate AI's next move based on model state. + + Args: + model_state: Dictionary containing: + - markov_counts: Transition count matrix + - frequency_counts: Overall frequency counts + - player_last_move: The player's last move from previous round + - smoothing: Smoothing factor for probability calculations + - temperature: Temperature parameter for entropy weighting + - last_lambdas: Last calculated model weights + + Returns: + Tuple containing: + - str: AI's chosen move (rock, paper, scissors) + - Dict: Updated model state (ready for next round after player moves) + """ + # Initialize model state if None + if model_state is None: + model_state = { + "player_last_move": None, + "ai_last_move": None, + "last_result": None, + "markov_counts": np.ones((3, 3)) * self.smoothing, + "frequency_counts": np.ones(3) * self.smoothing, + "player_second_last_move": None, + "smoothing": self.smoothing, + "temperature": self.temperature, + "last_lambdas": {"markov": 0.5, "freq": 0.5} + } + + # Extract values from model state + markov_counts = model_state.get("markov_counts", np.ones((3, 3)) * self.smoothing) + frequency_counts = model_state.get("frequency_counts", np.ones(3) * self.smoothing) + player_last_move = model_state.get("player_last_move") + player_second_last_move = model_state.get("player_second_last_move") + smoothing = model_state.get("smoothing", self.smoothing) + temperature = model_state.get("temperature", self.temperature) + last_lambdas = model_state.get("last_lambdas", {"markov": 0.5, "freq": 0.5}) + + # Define move mappings + move_to_idx = {"rock": 0, "paper": 1, "scissors": 2} + idx_to_move = {0: "rock", 1: "paper", 2: "scissors"} + counters = {"rock": "paper", "paper": "scissors", "scissors": "rock"} + + # Helper functions + def calculate_entropy(probs): + """Calculate Shannon entropy of a probability distribution""" + entropy = 0 + for p in probs: + if p > 0: + entropy -= p * math.log2(p) + return entropy + + def get_markov_probabilities(move): + """Get transition probabilities from the Markov model""" + if move not in move_to_idx: + # Default to uniform if unknown move + return [1/3, 1/3, 1/3] + + move_idx = move_to_idx[move] + row_sum = np.sum(markov_counts[move_idx]) + return markov_counts[move_idx] / row_sum + + def get_frequency_probabilities(): + """Get overall move probabilities from the frequency model""" + total = np.sum(frequency_counts) + return frequency_counts / total + + def calculate_lambdas(markov_probs, freq_probs): + """Calculate adaptive weights using entropy-based formula""" + # Calculate entropies + markov_entropy = calculate_entropy(markov_probs) + freq_entropy = calculate_entropy(freq_probs) + + # Apply temperature and calculate weights + denom = math.exp(-temperature * markov_entropy) + math.exp(-temperature * freq_entropy) + lambda_markov = math.exp(-temperature * markov_entropy) / denom + lambda_freq = math.exp(-temperature * freq_entropy) / denom + + # Return weights and entropy values for monitoring + return lambda_markov, lambda_freq, { + "markov": lambda_markov, + "freq": lambda_freq, + "markov_entropy": markov_entropy, + "freq_entropy": freq_entropy + } + + # Update the models with historical data if available + if player_last_move and player_last_move in move_to_idx: + # Update frequency counts + last_idx = move_to_idx[player_last_move] + frequency_counts[last_idx] += 1 + + # Update Markov model if we have two consecutive moves + if player_second_last_move and player_second_last_move in move_to_idx: + second_last_idx = move_to_idx[player_second_last_move] + markov_counts[second_last_idx][last_idx] += 1 + + # For prediction, use player_last_move for Markov model + if player_last_move is None: + # No history yet, use random move (not counter) + ai_move = np.random.choice(self.possible_moves) + else: + # Get probabilities from each model + markov_probs = get_markov_probabilities(player_last_move) + freq_probs = get_frequency_probabilities() + + # Calculate adaptive lambda weights + lambda_markov, lambda_freq, new_lambdas = calculate_lambdas(markov_probs, freq_probs) + + # Combine predictions with lambda weights + combined_probs = lambda_markov * np.array(markov_probs) + lambda_freq * np.array(freq_probs) + + # Predict most likely move + predicted_idx = np.argmax(combined_probs) + predicted_move = idx_to_move[predicted_idx] + ai_move = counters[predicted_move] + + # Update lambdas in state + last_lambdas = new_lambdas + + # Prepare updated state + updated_state = { + "markov_counts": markov_counts, + "frequency_counts": frequency_counts, + "player_last_move": None, # Keep until service layer updates it + "player_second_last_move": player_last_move, # Keep until service layer updates it + "smoothing": smoothing, + "temperature": temperature, + "last_lambdas": last_lambdas + } + + # Return AI move and updated state + return ai_move, updated_state \ No newline at end of file diff --git a/RockPaperScissor/models/base_ai.py b/RockPaperScissor/models/base_ai.py new file mode 100644 index 0000000000000000000000000000000000000000..7d31a3f3e0bf2cee4a574c46781555890f26ee56 --- /dev/null +++ b/RockPaperScissor/models/base_ai.py @@ -0,0 +1,17 @@ +# RockPaperScissor/models/base_ai.py +from typing import Dict, Any, Tuple + +class BaseAI: + possible_moves = ["rock", "paper", "scissors"] + def make_move(self, model_state: Dict[str, Any] = None) -> Tuple[str, Dict[str, Any]]: + """Make a move in the game. + + Args: + model_state: Optional state data for the AI model + + Returns: + Tuple of (move, updated_state) where: + - move is one of: "rock", "paper", "scissors" + - updated_state is the new state of the AI model + """ + raise NotImplementedError \ No newline at end of file diff --git a/RockPaperScissor/models/random_ai.py b/RockPaperScissor/models/random_ai.py new file mode 100644 index 0000000000000000000000000000000000000000..2f387b3fcb68236d677339ebb84adc96de6dbe0b --- /dev/null +++ b/RockPaperScissor/models/random_ai.py @@ -0,0 +1,11 @@ +# RockPaperScissor/models/random_ai.py +import random +from typing import Dict, Any, Tuple, Optional +from .base_ai import BaseAI # Assuming base_ai.py is in the same directory + +class RandomAI(BaseAI): + def make_move(self, model_state: Optional[Dict[str, Any]] = None) -> Tuple[str, Dict[str, Any]]: + """Makes a random move.""" + # model_state is ignored by RandomAI + # Returns the chosen move and an empty dictionary for the (unchanged) state + return random.choice(self.possible_moves), {} \ No newline at end of file diff --git a/RockPaperScissor/repositories/.gitkeep b/RockPaperScissor/repositories/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..1eb5a6251c982bb06b4141e4b97a0ffb65f4b67f --- /dev/null +++ b/RockPaperScissor/repositories/.gitkeep @@ -0,0 +1 @@ +# Placeholder to keep the repositories directory tracked by git \ No newline at end of file diff --git a/RockPaperScissor/repositories/__init__.py b/RockPaperScissor/repositories/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9b2737f84dfcc7a4c484bb777aa01ec49e547d1b --- /dev/null +++ b/RockPaperScissor/repositories/__init__.py @@ -0,0 +1,12 @@ +# RockPaperScissor/repositories/__init__.py +from .storage import Storage, StorageError +from .sql_storage import SQLStorage # This will be our DummySQLStorage +from .combined_storage import CombinedStorage # This will use DummySQLStorage +from .s3_storage import S3Storage # Add later if needed + +__all__ = [ + 'Storage', 'StorageError', + 'SQLStorage', + 'CombinedStorage', # Uncommented for S3 testing + 'S3Storage', +] \ No newline at end of file diff --git a/RockPaperScissor/repositories/__pycache__/__init__.cpython-310.pyc b/RockPaperScissor/repositories/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bda7c80c4b1ae313b33db0d1cacaa8e911ec95ca Binary files /dev/null and b/RockPaperScissor/repositories/__pycache__/__init__.cpython-310.pyc differ diff --git a/RockPaperScissor/repositories/__pycache__/__init__.cpython-313.pyc b/RockPaperScissor/repositories/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d224c9f445f8de5874ed3a33bedf1aa8a4da87a Binary files /dev/null and b/RockPaperScissor/repositories/__pycache__/__init__.cpython-313.pyc differ diff --git a/RockPaperScissor/repositories/__pycache__/combined_storage.cpython-310.pyc b/RockPaperScissor/repositories/__pycache__/combined_storage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d048a556762197b7533058fdd509d8ecd7b2b538 Binary files /dev/null and b/RockPaperScissor/repositories/__pycache__/combined_storage.cpython-310.pyc differ diff --git a/RockPaperScissor/repositories/__pycache__/combined_storage.cpython-313.pyc b/RockPaperScissor/repositories/__pycache__/combined_storage.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..342b771958a2258a645d6cfb60823eca104fc98d Binary files /dev/null and b/RockPaperScissor/repositories/__pycache__/combined_storage.cpython-313.pyc differ diff --git a/RockPaperScissor/repositories/__pycache__/s3_storage.cpython-310.pyc b/RockPaperScissor/repositories/__pycache__/s3_storage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdbde74370d3407648084a3d8fed1762098cbc4b Binary files /dev/null and b/RockPaperScissor/repositories/__pycache__/s3_storage.cpython-310.pyc differ diff --git a/RockPaperScissor/repositories/__pycache__/s3_storage.cpython-313.pyc b/RockPaperScissor/repositories/__pycache__/s3_storage.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a007ae18218ddc530f2c5ec93a0acb3bb6ec5c10 Binary files /dev/null and b/RockPaperScissor/repositories/__pycache__/s3_storage.cpython-313.pyc differ diff --git a/RockPaperScissor/repositories/__pycache__/sql_storage.cpython-310.pyc b/RockPaperScissor/repositories/__pycache__/sql_storage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12b755d7eda8dd82ad68a18b86f80aecfd74088f Binary files /dev/null and b/RockPaperScissor/repositories/__pycache__/sql_storage.cpython-310.pyc differ diff --git a/RockPaperScissor/repositories/__pycache__/sql_storage.cpython-313.pyc b/RockPaperScissor/repositories/__pycache__/sql_storage.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3078608c21f2952c8b37f0058e860dded6753e8e Binary files /dev/null and b/RockPaperScissor/repositories/__pycache__/sql_storage.cpython-313.pyc differ diff --git a/RockPaperScissor/repositories/__pycache__/storage.cpython-310.pyc b/RockPaperScissor/repositories/__pycache__/storage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33add72e77eb0f5512b1a385885e1d561409143c Binary files /dev/null and b/RockPaperScissor/repositories/__pycache__/storage.cpython-310.pyc differ diff --git a/RockPaperScissor/repositories/__pycache__/storage.cpython-313.pyc b/RockPaperScissor/repositories/__pycache__/storage.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..464270a969eca279bfbe0f2e8543ecc0dfe2f870 Binary files /dev/null and b/RockPaperScissor/repositories/__pycache__/storage.cpython-313.pyc differ diff --git a/RockPaperScissor/repositories/combined_storage.py b/RockPaperScissor/repositories/combined_storage.py new file mode 100644 index 0000000000000000000000000000000000000000..765c98d9d7f115702586f82484ec849a428c41eb --- /dev/null +++ b/RockPaperScissor/repositories/combined_storage.py @@ -0,0 +1,71 @@ +# RockPaperScissor/repositories/combined_storage.py +from .storage import Storage, StorageError +from .sql_storage import SQLStorage +from .s3_storage import S3Storage +from typing import Dict, Any, Optional +import os + +class CombinedStorage(Storage): + def __init__(self, db_path: str = "data/game_history.db"): + self.sql_storage = SQLStorage(db_path) + self.s3_storage = S3Storage() + self._memory_cache = {} # Simple in-memory cache + + async def initialize(self): + """Initialize storage components""" + await self.sql_storage.initialize() + await self.s3_storage.initialize() + + async def save_game_round(self, game_data: Dict[str, Any]) -> bool: + """Save game round to both storage systems and cache""" + # Save to SQL storage + sql_success = await self.sql_storage.save_game_round(game_data) + + # Save to S3 storage + s3_success = await self.s3_storage.save_game_round(game_data) + + if sql_success or s3_success: + # Update memory cache + session_id = game_data.get('game_id') + if session_id: + self._memory_cache[session_id] = game_data + + return sql_success or s3_success + + async def save_full_session(self, session_id: str, session_data: Dict[str, Any]) -> bool: + """Save the complete game session to both storage systems""" + # Save to SQL storage + sql_success = await self.sql_storage.save_full_session(session_id, session_data) + + # Save to S3 storage + s3_success = await self.s3_storage.save_full_session(session_id, session_data) + + return sql_success or s3_success + + async def get_game_history(self, session_id: str) -> Optional[Dict[str, Any]]: + """Get game history from cache or storage""" + # Try cache first + if session_id in self._memory_cache: + return self._memory_cache[session_id] + + # Try SQL storage + history = await self.sql_storage.get_game_history(session_id) + if history: + self._memory_cache[session_id] = history + return history + + # Try S3 storage + history = await self.s3_storage.get_game_history(session_id) + if history: + self._memory_cache[session_id] = history + return history + + async def get_ai_state(self, session_id: str, ai_type: str) -> Optional[Dict[str, Any]]: + """Get AI state from storage""" + return await self.sql_storage.get_ai_state(session_id, ai_type) + + async def close(self) -> None: + """Close storage connections""" + await self.sql_storage.close() + await self.s3_storage.close() + self._memory_cache.clear() \ No newline at end of file diff --git a/RockPaperScissor/repositories/s3_storage.py b/RockPaperScissor/repositories/s3_storage.py new file mode 100644 index 0000000000000000000000000000000000000000..cf471d597497309ae14cd492c68950a217342c98 --- /dev/null +++ b/RockPaperScissor/repositories/s3_storage.py @@ -0,0 +1,321 @@ +import boto3 +import json +import sqlite3 +import io +from datetime import datetime +from typing import Dict, Any, Optional +from .storage import Storage +import os +import numpy as np + +class S3Storage(Storage): + def __init__(self, bucket_name: str = None): + self.bucket_name = bucket_name or os.getenv('AWS_S3_BUCKET_NAME') + if not self.bucket_name: + raise ValueError("S3 bucket name must be provided either through constructor or AWS_S3_BUCKET_NAME environment variable") + + # Configure S3 client with LocalStack endpoint if available + endpoint_url = os.getenv('AWS_ENDPOINT_URL') + self.s3_client = boto3.client( + 's3', + endpoint_url=endpoint_url, + region_name=os.getenv('AWS_REGION', 'us-east-1') + ) + + def convert_ndarray(self, obj): + if isinstance(obj, np.ndarray): + return obj.tolist() + if isinstance(obj, dict): + return {k: self.convert_ndarray(v) for k, v in obj.items()} + if isinstance(obj, list): + return [self.convert_ndarray(x) for x in obj] + return obj + + async def initialize(self): + """Initialize S3 storage - verify bucket exists""" + try: + self.s3_client.head_bucket(Bucket=self.bucket_name) + except Exception as e: + # If using LocalStack, create the bucket if it doesn't exist + if os.getenv('AWS_ENDPOINT_URL'): + try: + self.s3_client.create_bucket(Bucket=self.bucket_name) + except Exception as create_error: + raise Exception(f"Failed to create S3 bucket {self.bucket_name}: {str(create_error)}") + else: + raise Exception(f"Failed to access S3 bucket {self.bucket_name}: {str(e)}") + + def _get_db_connection(self, session_id: str) -> sqlite3.Connection: + """Get SQLite database connection for a session""" + # Create in-memory database + conn = sqlite3.connect(':memory:') + cursor = conn.cursor() + + # Try to load existing data from S3 + try: + s3_key = f"game_sessions/{session_id}/game.db" + response = self.s3_client.get_object( + Bucket=self.bucket_name, + Key=s3_key + ) + db_data = response['Body'].read() + conn.executescript(db_data.decode('utf-8')) + except self.s3_client.exceptions.NoSuchKey: + # No existing data, start with empty tables + cursor.execute(''' + CREATE TABLE IF NOT EXISTS game_sessions ( + session_id TEXT PRIMARY KEY, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + completed_at TIMESTAMP, + total_rounds INTEGER DEFAULT 0, + player_wins INTEGER DEFAULT 0, + ai_wins INTEGER DEFAULT 0, + draws INTEGER DEFAULT 0, + is_completed BOOLEAN DEFAULT FALSE + ) + ''') + + cursor.execute(''' + CREATE TABLE IF NOT EXISTS game_rounds ( + round_id INTEGER PRIMARY KEY AUTOINCREMENT, + session_id TEXT, + round_number INTEGER, + player_move TEXT, + ai_move TEXT, + result TEXT, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + FOREIGN KEY (session_id) REFERENCES game_sessions(session_id) + ) + ''') + + cursor.execute(''' + CREATE TABLE IF NOT EXISTS ai_states ( + session_id TEXT, + ai_type TEXT, + state_data TEXT, + last_updated TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + PRIMARY KEY (session_id, ai_type), + FOREIGN KEY (session_id) REFERENCES game_sessions(session_id) + ) + ''') + + return conn + + def _save_db_to_s3(self, conn: sqlite3.Connection, session_id: str): + """Save SQLite database to S3""" + buffer = io.BytesIO() + for line in conn.iterdump(): + buffer.write(f'{line}\n'.encode('utf-8')) + buffer.seek(0) + + s3_key = f"game_sessions/{session_id}/game.db" + self.s3_client.put_object( + Bucket=self.bucket_name, + Key=s3_key, + Body=buffer.getvalue() + ) + + async def save_game_round(self, game_data: Dict[str, Any]) -> bool: + """Save a game round to S3""" + try: + session_id = game_data.get('game_id') + if not session_id: + return False + + conn = self._get_db_connection(session_id) + cursor = conn.cursor() + + # Update or insert game session + cursor.execute(''' + INSERT INTO game_sessions (session_id, total_rounds, player_wins, ai_wins, draws) + VALUES (?, 1, ?, ?, ?) + ON CONFLICT(session_id) DO UPDATE SET + total_rounds = total_rounds + 1, + player_wins = player_wins + ?, + ai_wins = ai_wins + ?, + draws = draws + ? + ''', ( + session_id, + 1 if game_data['result'] == 'player_win' else 0, + 1 if game_data['result'] == 'ai_win' else 0, + 1 if game_data['result'] == 'draw' else 0, + 1 if game_data['result'] == 'player_win' else 0, + 1 if game_data['result'] == 'ai_win' else 0, + 1 if game_data['result'] == 'draw' else 0 + )) + + # Get current round number + cursor.execute(''' + SELECT COUNT(*) FROM game_rounds WHERE session_id = ? + ''', (session_id,)) + round_number = cursor.fetchone()[0] + 1 + + # Insert game round + cursor.execute(''' + INSERT INTO game_rounds (session_id, round_number, player_move, ai_move, result) + VALUES (?, ?, ?, ?, ?) + ''', ( + session_id, + round_number, + game_data['player_move'], + game_data['ai_move'], + game_data['result'] + )) + + # Save AI state if provided + if 'ai_state' in game_data: + ai_state_serializable = json.dumps(self.convert_ndarray(game_data['ai_state'])) + cursor.execute(''' + INSERT INTO ai_states (session_id, ai_type, state_data) + VALUES (?, ?, ?) + ON CONFLICT(session_id, ai_type) DO UPDATE SET + state_data = ?, + last_updated = CURRENT_TIMESTAMP + ''', ( + session_id, + game_data.get('ai_type', 'adaptive_markov'), + ai_state_serializable, + ai_state_serializable + )) + + # Save to S3 + self._save_db_to_s3(conn, session_id) + conn.close() + + return True + + except Exception as e: + print(f"Error saving game round to S3: {e}") + return False + + async def save_full_session(self, session_id: str, session_data: Dict[str, Any]) -> bool: + """Save the complete game session to S3""" + try: + conn = self._get_db_connection(session_id) + cursor = conn.cursor() + + # Insert or update the session summary + cursor.execute(''' + INSERT INTO game_sessions (session_id, total_rounds, player_wins, ai_wins, draws, is_completed, completed_at) + VALUES (?, ?, ?, ?, ?, ?, CURRENT_TIMESTAMP) + ON CONFLICT(session_id) DO UPDATE SET + total_rounds = excluded.total_rounds, + player_wins = excluded.player_wins, + ai_wins = excluded.ai_wins, + draws = excluded.draws, + is_completed = excluded.is_completed, + completed_at = excluded.completed_at + ''', ( + session_id, + session_data['total_rounds'], + session_data['player_wins'], + session_data['ai_wins'], + session_data['draws'], + True + )) + + # Delete existing rounds for this session + cursor.execute('DELETE FROM game_rounds WHERE session_id = ?', (session_id,)) + + print(f"[DEBUG] session_data['rounds']: {session_data['rounds']}") + # Insert all rounds + for round_data in session_data['rounds']: + created_at = round_data.get('created_at', datetime.now().isoformat()) + cursor.execute(''' + INSERT INTO game_rounds (session_id, round_number, player_move, ai_move, result, created_at) + VALUES (?, ?, ?, ?, ?, ?) + ''', ( + session_id, + round_data['round_number'], + round_data['player_move'], + round_data['ai_move'], + round_data['result'], + created_at + )) + + # Save AI state if available + if session_data['rounds']: + last_round = session_data['rounds'][-1] + ai_type = last_round.get('ai_type', 'random') + ai_state = last_round.get('ai_state', {}) + ai_state_serializable = json.dumps(self.convert_ndarray(ai_state)) + cursor.execute(''' + INSERT INTO ai_states (session_id, ai_type, state_data) + VALUES (?, ?, ?) + ON CONFLICT(session_id, ai_type) DO UPDATE SET + state_data = ?, + last_updated = CURRENT_TIMESTAMP + ''', ( + session_id, + ai_type, + ai_state_serializable, + ai_state_serializable + )) + + # Save to S3 + self._save_db_to_s3(conn, session_id) + conn.close() + + return True + + except Exception as e: + print(f"Error saving full session to S3: {e}") + return False + + async def get_game_history(self, session_id: str) -> Optional[Dict[str, Any]]: + """Retrieve game history for a session from S3""" + try: + conn = self._get_db_connection(session_id) + cursor = conn.cursor() + + # Get session stats + cursor.execute(''' + SELECT total_rounds, player_wins, ai_wins, draws, is_completed, completed_at + FROM game_sessions + WHERE session_id = ? + ''', (session_id,)) + session_data = cursor.fetchone() + + if not session_data: + conn.close() + return None + + # Get all rounds + cursor.execute(''' + SELECT round_number, player_move, ai_move, result, created_at + FROM game_rounds + WHERE session_id = ? + ORDER BY round_number + ''', (session_id,)) + rounds = cursor.fetchall() + + result = { + 'session_id': session_id, + 'total_rounds': session_data[0], + 'player_wins': session_data[1], + 'ai_wins': session_data[2], + 'draws': session_data[3], + 'is_completed': session_data[4], + 'completed_at': session_data[5], + 'rounds': [ + { + 'round_number': r[0], + 'player_move': r[1], + 'ai_move': r[2], + 'result': r[3], + 'created_at': r[4] + } + for r in rounds + ] + } + + conn.close() + return result + + except Exception as e: + print(f"Error retrieving game history from S3: {e}") + return None + + async def close(self) -> None: + """Close S3 client connection""" + self.s3_client = None \ No newline at end of file diff --git a/RockPaperScissor/repositories/sql_storage.py b/RockPaperScissor/repositories/sql_storage.py new file mode 100644 index 0000000000000000000000000000000000000000..29899bb4caa258432301fcf9aacfc390202974e7 --- /dev/null +++ b/RockPaperScissor/repositories/sql_storage.py @@ -0,0 +1,307 @@ +# RockPaperScissor/repositories/sql_storage.py +from .storage import Storage +from typing import Dict, Any, Optional +import sqlite3 +import json +import asyncio +from pathlib import Path +from datetime import datetime +import numpy as np + +class SQLStorage(Storage): + def __init__(self, db_path: str = "data/game_history.db"): + self.db_path = db_path + self.conn = None + self._ensure_db_directory() + + def _ensure_db_directory(self): + """Ensure the database directory exists""" + Path(self.db_path).parent.mkdir(parents=True, exist_ok=True) + + async def initialize(self): + """Initialize the database connection and create tables if they don't exist""" + self.conn = sqlite3.connect(self.db_path) + cursor = self.conn.cursor() + + # Create game_sessions table with completion status + cursor.execute(''' + CREATE TABLE IF NOT EXISTS game_sessions ( + session_id TEXT PRIMARY KEY, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + completed_at TIMESTAMP, + total_rounds INTEGER DEFAULT 0, + player_wins INTEGER DEFAULT 0, + ai_wins INTEGER DEFAULT 0, + draws INTEGER DEFAULT 0, + is_completed BOOLEAN DEFAULT FALSE + ) + ''') + + # Create game_rounds table + cursor.execute(''' + CREATE TABLE IF NOT EXISTS game_rounds ( + round_id INTEGER PRIMARY KEY AUTOINCREMENT, + session_id TEXT, + round_number INTEGER, + player_move TEXT, + ai_move TEXT, + result TEXT, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + FOREIGN KEY (session_id) REFERENCES game_sessions(session_id) + ) + ''') + + # Create ai_states table + cursor.execute(''' + CREATE TABLE IF NOT EXISTS ai_states ( + session_id TEXT, + ai_type TEXT, + state_data TEXT, + last_updated TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + PRIMARY KEY (session_id, ai_type), + FOREIGN KEY (session_id) REFERENCES game_sessions(session_id) + ) + ''') + + self.conn.commit() + + async def save_game_round(self, game_data: Dict[str, Any]) -> bool: + """Save a game round to the database""" + try: + cursor = self.conn.cursor() + session_id = game_data.get('game_id') + + # Update or insert game session + cursor.execute(''' + INSERT INTO game_sessions (session_id, total_rounds, player_wins, ai_wins, draws) + VALUES (?, 1, ?, ?, ?) + ON CONFLICT(session_id) DO UPDATE SET + total_rounds = total_rounds + 1, + player_wins = player_wins + ?, + ai_wins = ai_wins + ?, + draws = draws + ? + ''', ( + session_id, + 1 if game_data['result'] == 'player_win' else 0, + 1 if game_data['result'] == 'ai_win' else 0, + 1 if game_data['result'] == 'draw' else 0, + 1 if game_data['result'] == 'player_win' else 0, + 1 if game_data['result'] == 'ai_win' else 0, + 1 if game_data['result'] == 'draw' else 0 + )) + + # Get current round number + cursor.execute(''' + SELECT COUNT(*) FROM game_rounds WHERE session_id = ? + ''', (session_id,)) + round_number = cursor.fetchone()[0] + 1 + + # Insert game round + cursor.execute(''' + INSERT INTO game_rounds (session_id, round_number, player_move, ai_move, result) + VALUES (?, ?, ?, ?, ?) + ''', ( + session_id, + round_number, + game_data['player_move'], + game_data['ai_move'], + game_data['result'] + )) + + # Save AI state if provided + if 'ai_state' in game_data: + def convert_ndarray(obj): + if isinstance(obj, np.ndarray): + return obj.tolist() + if isinstance(obj, dict): + return {k: convert_ndarray(v) for k, v in obj.items()} + if isinstance(obj, list): + return [convert_ndarray(x) for x in obj] + return obj + ai_state_serializable = convert_ndarray(game_data['ai_state']) + cursor.execute(''' + INSERT INTO ai_states (session_id, ai_type, state_data) + VALUES (?, ?, ?) + ON CONFLICT(session_id, ai_type) DO UPDATE SET + state_data = ?, + last_updated = CURRENT_TIMESTAMP + ''', ( + session_id, + game_data.get('ai_type', 'adaptive_markov'), + json.dumps(ai_state_serializable), + json.dumps(ai_state_serializable) + )) + + self.conn.commit() + return True + + except Exception as e: + print(f"Error saving game round: {e}") + self.conn.rollback() + return False + + async def complete_session(self, session_id: str) -> bool: + """Mark a game session as completed""" + try: + cursor = self.conn.cursor() + cursor.execute(''' + UPDATE game_sessions + SET is_completed = TRUE, + completed_at = CURRENT_TIMESTAMP + WHERE session_id = ? + ''', (session_id,)) + self.conn.commit() + return True + except Exception as e: + print(f"Error completing session: {e}") + self.conn.rollback() + return False + + async def get_game_history(self, session_id: str) -> Optional[Dict[str, Any]]: + """Retrieve game history for a session""" + try: + cursor = self.conn.cursor() + + # Get session stats + cursor.execute(''' + SELECT total_rounds, player_wins, ai_wins, draws, is_completed, completed_at + FROM game_sessions + WHERE session_id = ? + ''', (session_id,)) + session_data = cursor.fetchone() + + if not session_data: + return None + + # Get all rounds + cursor.execute(''' + SELECT round_number, player_move, ai_move, result, created_at + FROM game_rounds + WHERE session_id = ? + ORDER BY round_number + ''', (session_id,)) + rounds = cursor.fetchall() + + return { + 'session_id': session_id, + 'total_rounds': session_data[0], + 'player_wins': session_data[1], + 'ai_wins': session_data[2], + 'draws': session_data[3], + 'is_completed': session_data[4], + 'completed_at': session_data[5], + 'rounds': [ + { + 'round_number': r[0], + 'player_move': r[1], + 'ai_move': r[2], + 'result': r[3], + 'created_at': r[4] + } + for r in rounds + ] + } + + except Exception as e: + print(f"Error retrieving game history: {e}") + return None + + async def get_ai_state(self, session_id: str, ai_type: str) -> Optional[Dict[str, Any]]: + """Retrieve AI state for a session""" + try: + cursor = self.conn.cursor() + cursor.execute(''' + SELECT state_data + FROM ai_states + WHERE session_id = ? AND ai_type = ? + ''', (session_id, ai_type)) + + result = cursor.fetchone() + if result: + return json.loads(result[0]) + return None + + except Exception as e: + print(f"Error retrieving AI state: {e}") + return None + + async def close(self) -> None: + """Close the database connection""" + if self.conn: + self.conn.close() + self.conn = None + + async def save_full_session(self, session_id: str, session_data: Dict[str, Any]) -> bool: + """Save the full session and all rounds to the database in one go.""" + print(f"[DEBUG] save_full_session called for session_id: {session_id}") + print(f"[DEBUG] session_data: {session_data}") + try: + cursor = self.conn.cursor() + # Insert or update the session summary + cursor.execute(''' + INSERT INTO game_sessions (session_id, total_rounds, player_wins, ai_wins, draws, is_completed, completed_at) + VALUES (?, ?, ?, ?, ?, ?, CURRENT_TIMESTAMP) + ON CONFLICT(session_id) DO UPDATE SET + total_rounds = excluded.total_rounds, + player_wins = excluded.player_wins, + ai_wins = excluded.ai_wins, + draws = excluded.draws, + is_completed = excluded.is_completed, + completed_at = excluded.completed_at + ''', ( + session_id, + session_data['total_rounds'], + session_data['player_wins'], + session_data['ai_wins'], + session_data['draws'], + True + )) + # Delete existing rounds for this session + cursor.execute('DELETE FROM game_rounds WHERE session_id = ?', (session_id,)) + # Insert all rounds + for round_data in session_data['rounds']: + print(f"[DEBUG] Inserting round: {round_data}") + created_at = round_data.get('created_at', datetime.now().isoformat()) + cursor.execute(''' + INSERT INTO game_rounds (session_id, round_number, player_move, ai_move, result, created_at) + VALUES (?, ?, ?, ?, ?, ?) + ''', ( + session_id, + round_data['round_number'], + round_data['player_move'], + round_data['ai_move'], + round_data['result'], + created_at + )) + # Save AI state (if available) + if session_data['rounds']: + last_round = session_data['rounds'][-1] + ai_type = last_round.get('ai_type', 'random') + ai_state = last_round.get('ai_state', {}) + def convert_ndarray(obj): + if isinstance(obj, np.ndarray): + return obj.tolist() + if isinstance(obj, dict): + return {k: convert_ndarray(v) for k, v in obj.items()} + if isinstance(obj, list): + return [convert_ndarray(x) for x in obj] + return obj + ai_state_serializable = convert_ndarray(ai_state) + cursor.execute(''' + INSERT INTO ai_states (session_id, ai_type, state_data) + VALUES (?, ?, ?) + ON CONFLICT(session_id, ai_type) DO UPDATE SET + state_data = ?, + last_updated = CURRENT_TIMESTAMP + ''', ( + session_id, + ai_type, + json.dumps(ai_state_serializable), + json.dumps(ai_state_serializable) + )) + self.conn.commit() + return True + except Exception as e: + print(f"Error saving full session: {e}") + self.conn.rollback() + return False \ No newline at end of file diff --git a/RockPaperScissor/repositories/storage.py b/RockPaperScissor/repositories/storage.py new file mode 100644 index 0000000000000000000000000000000000000000..aa8872d83f8f9ce269399aac17b62ccab4799e82 --- /dev/null +++ b/RockPaperScissor/repositories/storage.py @@ -0,0 +1,23 @@ +# RockPaperScissor/repositories/storage.py +from abc import ABC, abstractmethod +from typing import Dict, Any, Optional + +class StorageError(Exception): + pass + +class Storage(ABC): + async def initialize(self): # Add initialize method + pass + + @abstractmethod + async def save_game_round(self, game_data: Dict[str, Any]) -> bool: + pass + + # We don't need these for the minimal version with GameService not using user states yet + # def get_user_state(self, user_id: str) -> Optional[Dict[str, Any]]: + # return None + # def save_user_state(self, user_id: str, model_name: str, model_state: Dict[str, Any]) -> None: + # pass + + async def close(self) -> None: + pass \ No newline at end of file diff --git a/RockPaperScissor/routes/__init__.py b/RockPaperScissor/routes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..af5108239f04bb9264519613a049dd2022429f3d --- /dev/null +++ b/RockPaperScissor/routes/__init__.py @@ -0,0 +1,9 @@ +""" +Routes package initialization for RockPaperScissor game. +Contains API routes definitions. +""" +from .game import game_router + +__all__ = [ + 'game_router' +] \ No newline at end of file diff --git a/RockPaperScissor/routes/__pycache__/__init__.cpython-310.pyc b/RockPaperScissor/routes/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18cdb94574fe622e72abc58fb4654e7fd456a6d4 Binary files /dev/null and b/RockPaperScissor/routes/__pycache__/__init__.cpython-310.pyc differ diff --git a/RockPaperScissor/routes/__pycache__/__init__.cpython-313.pyc b/RockPaperScissor/routes/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e615307d94c6d789f323065bd4b8fa452f94e2f4 Binary files /dev/null and b/RockPaperScissor/routes/__pycache__/__init__.cpython-313.pyc differ diff --git a/RockPaperScissor/routes/__pycache__/game.cpython-310.pyc b/RockPaperScissor/routes/__pycache__/game.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..029adbffea6bc56da7d7e853ef5ea7f2bdc47703 Binary files /dev/null and b/RockPaperScissor/routes/__pycache__/game.cpython-310.pyc differ diff --git a/RockPaperScissor/routes/__pycache__/game.cpython-313.pyc b/RockPaperScissor/routes/__pycache__/game.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..924acb5e948604e6a61d9588ba8e059b9f20f763 Binary files /dev/null and b/RockPaperScissor/routes/__pycache__/game.cpython-313.pyc differ diff --git a/RockPaperScissor/routes/__pycache__/game_o.cpython-310.pyc b/RockPaperScissor/routes/__pycache__/game_o.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7be21dae05f1bbdeb35bfca16662e09800553d0b Binary files /dev/null and b/RockPaperScissor/routes/__pycache__/game_o.cpython-310.pyc differ diff --git a/RockPaperScissor/routes/__pycache__/history.cpython-313.pyc b/RockPaperScissor/routes/__pycache__/history.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..713ed420ad223cb542f356ae0928068078285947 Binary files /dev/null and b/RockPaperScissor/routes/__pycache__/history.cpython-313.pyc differ diff --git a/RockPaperScissor/routes/__pycache__/stats.cpython-310.pyc b/RockPaperScissor/routes/__pycache__/stats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2ac78350a3a36aec365cf0261cbbb4b31024460 Binary files /dev/null and b/RockPaperScissor/routes/__pycache__/stats.cpython-310.pyc differ diff --git a/RockPaperScissor/routes/__pycache__/stats.cpython-313.pyc b/RockPaperScissor/routes/__pycache__/stats.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d2765ccaefe1012271597aeda5f66f456b43ae5 Binary files /dev/null and b/RockPaperScissor/routes/__pycache__/stats.cpython-313.pyc differ diff --git a/RockPaperScissor/routes/game.py b/RockPaperScissor/routes/game.py new file mode 100644 index 0000000000000000000000000000000000000000..431d451d48828221b82b86755ea07df4ac88738c --- /dev/null +++ b/RockPaperScissor/routes/game.py @@ -0,0 +1,77 @@ +# backend/routes/game.py +from fastapi import APIRouter, HTTPException, Request +from RockPaperScissor.schemas.game import GameRequest, GameResponse #, GameSummary, LLMRequest +from RockPaperScissor.utils.logging import setup_logging +# from RockPaperScissor.repositories import CombinedStorage # Persistent storage (commented for Hugging Face) +from RockPaperScissor.services.service_instance import game_service + +# Set up logger +logger = setup_logging() + +game_router = APIRouter() + +@game_router.post("/play") +async def play_round(request: Request, game_request: GameRequest): + """ + Play a round with the player's move (in-memory only for Hugging Face) + """ + result = await game_service.play_round(game_request.session_id, game_request.player_move, game_request.ai_type) + if "error" in result: + raise HTTPException(status_code=400, detail=f"GameService error: {result['error']}") + return result + +# @game_router.post("/analyze") +# async def analyze_game_state(llm_request: LLMRequest): +# """ +# Get LLM analysis of the current game state. +# """ +# try: +# # Log the request +# logger.info(f"Analyze request: {llm_request.model_dump()}") +# +# # Get LLM analysis directly from LLM service +# analysis = llm_service.analyze_game_state(llm_request) +# return {"analysis": analysis} +# +# except Exception as e: +# logger.error(f"Error analyzing game state: {str(e)}") +# raise HTTPException( +# status_code=500, +# detail="An error occurred while analyzing the game state" +# ) + +@game_router.post("/end") +async def end_game(request: Request): + """ + End the current game session and save the final state. + """ + try: + data = await request.json() + session_id = data.get('session_id') + if not session_id: + raise HTTPException( + status_code=400, + detail="No session ID provided" + ) + # Save session to database before clearing cache + db_success = await game_service.save_session_to_db(session_id) + # Clear in-memory data + await game_service.clear_session(session_id) + if db_success: + return { + "status": "success", + "message": "Game session ended and saved to database successfully", + "game_history": None + } + else: + return { + "status": "error", + "message": "Game session ended, but failed to save to database", + "game_history": None + } + except Exception as e: + logger.error(f"Error ending game session: {str(e)}") + raise HTTPException( + status_code=500, + detail="An error occurred while ending the game session" + ) \ No newline at end of file diff --git a/RockPaperScissor/schemas/__pycache__/game.cpython-310.pyc b/RockPaperScissor/schemas/__pycache__/game.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25a68e0ef456ad3e5543f92535fc4a89a1d86bd7 Binary files /dev/null and b/RockPaperScissor/schemas/__pycache__/game.cpython-310.pyc differ diff --git a/RockPaperScissor/schemas/__pycache__/game.cpython-313.pyc b/RockPaperScissor/schemas/__pycache__/game.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b804bd5f9a253bed437b6faba256f57298940836 Binary files /dev/null and b/RockPaperScissor/schemas/__pycache__/game.cpython-313.pyc differ diff --git a/RockPaperScissor/schemas/game.py b/RockPaperScissor/schemas/game.py new file mode 100644 index 0000000000000000000000000000000000000000..49dc7c0d7ca7ae8da350b2a4a3fa4542f65ec48d --- /dev/null +++ b/RockPaperScissor/schemas/game.py @@ -0,0 +1,23 @@ +# RockPaperScissor/schemas/game.py +from pydantic import BaseModel, Field +# from typing import Optional, Dict, Any, Literal # OLD - Pydantic might not re-export Literal +from typing import Optional, Dict, Any, Literal # NEW - Import directly from typing + +import uuid +from datetime import datetime + +class GameRequest(BaseModel): + user_id: Optional[str] = Field("test_user") + session_id: Optional[str] = Field("test_session") + game_id: str = Field(default_factory=lambda: "gr_game_" + str(uuid.uuid4())) + user_move: Optional[Literal["rock", "paper", "scissors"]] = None # Literal is used here + ai_type: Optional[str] = None + +class GameResponse(BaseModel): + game_id: str + user_id: str + session_id: str + user_move: str + ai_move: str + result: Literal["player_win", "ai_win", "draw"] # Literal is used here + session_stats: Dict[str, Any] \ No newline at end of file diff --git a/RockPaperScissor/services/LLM_service.py b/RockPaperScissor/services/LLM_service.py new file mode 100644 index 0000000000000000000000000000000000000000..68ae54fe3e1288ec99151c3ed196b5d8a7fb6c20 --- /dev/null +++ b/RockPaperScissor/services/LLM_service.py @@ -0,0 +1,124 @@ +from transformers import AutoModelForCausalLM, AutoTokenizer +import torch +from typing import List, Dict, Optional +import re + +class LLMService: + def __init__(self): + # Switch to Qwen model (local path) + self.model_name = "model_cache/models--Qwen--Qwen3-0.6B/snapshots/e6de91484c29aa9480d55605af694f39b081c455" + print("[LLMService] Loading Qwen tokenizer...") + self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) + print("[LLMService] Qwen tokenizer loaded.") + + # Check for MPS (Metal Performance Shaders) availability for Apple Silicon + if torch.backends.mps.is_available(): + device = "mps" + print("[LLMService] Using MPS (Metal) for GPU acceleration") + else: + device = "cpu" + print("[LLMService] MPS not available, falling back to CPU") + + print("[LLMService] Loading Qwen model...") + self.model = AutoModelForCausalLM.from_pretrained( + self.model_name, + torch_dtype=torch.bfloat16 if device == "mps" else torch.float32, + device_map={"": device} + ) + print(f"[LLMService] Qwen model loaded on {device}.") + + def _format_frequency_stats(self, stats: Dict) -> str: + """Format frequency statistics into a readable string.""" + total_moves = sum(stats.get('move_distribution', {}).values()) + if total_moves == 0: + return "No game data available." + + # Calculate percentages + frequencies = {} + for move, count in stats.get('move_distribution', {}).items(): + frequencies[move] = (count / total_moves) * 100 + + # Find the most common move + most_common = max(frequencies.items(), key=lambda x: x[1]) + counter_move = { + "rock": "paper", + "paper": "scissors", + "scissors": "rock" + }[most_common[0]] + + formatted = "AI Move Analysis:\n" + formatted += f"Rock: {frequencies.get('rock', 0):.0f}%\n" + formatted += f"Paper: {frequencies.get('paper', 0):.0f}%\n" + formatted += f"Scissors: {frequencies.get('scissors', 0):.0f}%\n\n" + formatted += f"Strategy Suggestion:\n" + formatted += f"The AI tends to play {most_common[0]} most often ({most_common[1]:.0f}% of the time). " + formatted += f"Consider playing {counter_move} more frequently to counter this pattern." + + return formatted + + def _create_frequency_prompt(self, stats: Dict) -> str: + """Create a concise prompt for frequency-based analysis with a beacon example.""" + base_prompt = ( + "You are a helpful assistant that analyzes Rock-Paper-Scissors (RPS) AI opponent statistics to help players win. " + "Based on the provided AI move frequency stats, briefly recommend the best move for the player to counter the AI's most frequent move. " + "Your answer must start with '>>>', followed by a one-sentence explanation and then a new line with 'Recommendation: [Move]'.\n\n" + "Example:\n" + ">>> The AI most often plays Scissors (84% of the time), so you should play Rock to counter it, since Rock beats Scissors.\nRecommendation: Rock\n\n" + "AI Move Frequency Stats:\n" + ) + base_prompt += self._format_frequency_stats(stats) + return base_prompt + + def _clean_llm_output(self, output: str) -> str: + # Remove code block markers and markdown + output = re.sub(r'`{3,}', '', output) + output = re.sub(r'\*\*Final Answer\*\*', '', output, flags=re.IGNORECASE) + # Find the beacon and extract the answer + lines = [line.strip() for line in output.splitlines() if line.strip()] + for i, line in enumerate(lines): + if line.startswith('>>>'): + result = [line] + # Optionally include the next line if it's a recommendation + if i + 1 < len(lines) and lines[i + 1].lower().startswith('recommendation:'): + result.append(lines[i + 1]) + return '\n'.join(result) + # Fallback: previous cleaning logic + seen = set() + cleaned_lines = [] + for line in lines: + if line not in seen: + cleaned_lines.append(line) + seen.add(line) + for line in cleaned_lines: + if line.lower().startswith('recommendation:'): + return line + return '\n'.join(cleaned_lines) + + async def generate_response(self, prompt: str, stats: Optional[Dict] = None) -> str: + print("[LLMService] generate_response called.") + if stats: + print(f"[LLMService] Using stats for prompt: {stats}") + prompt = self._create_frequency_prompt(stats) + else: + print(f"[LLMService] Using raw prompt: {prompt}") + try: + print("[LLMService] Model generation started...") + inputs = self.tokenizer([prompt], return_tensors="pt") + with torch.no_grad(): + outputs = self.model.generate( + **inputs, + max_new_tokens=300, # Increased for structured response + temperature=0.4, + top_p=0.8, + do_sample=True + ) + response = self.tokenizer.decode(outputs[0], skip_special_tokens=True) + print("[LLMService] Model generation finished.") + # Clean up the output before returning + return self._clean_llm_output(response) + except Exception as e: + print(f"[LLMService] Error during model generation: {e}") + raise + + async def close(self): + pass # No async close needed for local inferencedocke \ No newline at end of file diff --git a/RockPaperScissor/services/__init__.py b/RockPaperScissor/services/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..089e56bfbcc91e355be478c3eb135caa2217a4a9 --- /dev/null +++ b/RockPaperScissor/services/__init__.py @@ -0,0 +1,8 @@ +# RockPaperScissor/services/__init__.py +from .game_service_class import GameService +# from .llm_service import LLMService # Add LLMService later + +__all__ = [ + 'GameService', + # 'LLMService', +] \ No newline at end of file diff --git a/RockPaperScissor/services/__pycache__/LLM_service.cpython-310.pyc b/RockPaperScissor/services/__pycache__/LLM_service.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50b6e667034b3ae27c0df39ddd18ffd6ac27825e Binary files /dev/null and b/RockPaperScissor/services/__pycache__/LLM_service.cpython-310.pyc differ diff --git a/RockPaperScissor/services/__pycache__/LLM_service.cpython-313.pyc b/RockPaperScissor/services/__pycache__/LLM_service.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..630372a19abf88e184ed094de55755069cdc01a6 Binary files /dev/null and b/RockPaperScissor/services/__pycache__/LLM_service.cpython-313.pyc differ diff --git a/RockPaperScissor/services/__pycache__/__init__.cpython-310.pyc b/RockPaperScissor/services/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd413b75c8bedd80dc54ec00793659b116d1ae9d Binary files /dev/null and b/RockPaperScissor/services/__pycache__/__init__.cpython-310.pyc differ diff --git a/RockPaperScissor/services/__pycache__/__init__.cpython-313.pyc b/RockPaperScissor/services/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05d9ce5bbff7bb20a9548104cde4a10f7578b3b5 Binary files /dev/null and b/RockPaperScissor/services/__pycache__/__init__.cpython-313.pyc differ diff --git a/RockPaperScissor/services/__pycache__/game_service.cpython-313.pyc b/RockPaperScissor/services/__pycache__/game_service.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0d435d5a8726af7802118c4f47e4bc437cb6169 Binary files /dev/null and b/RockPaperScissor/services/__pycache__/game_service.cpython-313.pyc differ diff --git a/RockPaperScissor/services/__pycache__/game_service_class.cpython-310.pyc b/RockPaperScissor/services/__pycache__/game_service_class.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4683f13014403bb849f9eaafec211bd88e8331b4 Binary files /dev/null and b/RockPaperScissor/services/__pycache__/game_service_class.cpython-310.pyc differ diff --git a/RockPaperScissor/services/__pycache__/game_service_class.cpython-313.pyc b/RockPaperScissor/services/__pycache__/game_service_class.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fabf424799d12e7b1b926466776ac65c2d88b7da Binary files /dev/null and b/RockPaperScissor/services/__pycache__/game_service_class.cpython-313.pyc differ diff --git a/RockPaperScissor/services/__pycache__/service_instance.cpython-310.pyc b/RockPaperScissor/services/__pycache__/service_instance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e68cb8925d3fab8bc0da21253995499fdc4335a6 Binary files /dev/null and b/RockPaperScissor/services/__pycache__/service_instance.cpython-310.pyc differ diff --git a/RockPaperScissor/services/__pycache__/service_instance.cpython-313.pyc b/RockPaperScissor/services/__pycache__/service_instance.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47c5720f929764d20c83689da67346d84348dae4 Binary files /dev/null and b/RockPaperScissor/services/__pycache__/service_instance.cpython-313.pyc differ diff --git a/RockPaperScissor/services/game_service_class.py b/RockPaperScissor/services/game_service_class.py new file mode 100644 index 0000000000000000000000000000000000000000..8faedfb5a2dc6a3a5b51e0770c9f0f5f495efcd4 --- /dev/null +++ b/RockPaperScissor/services/game_service_class.py @@ -0,0 +1,190 @@ +from typing import Dict, Any +from enum import Enum +from RockPaperScissor.models.adaptive_markov_ai import AdaptiveMarkovAI +from RockPaperScissor.models.base_ai import BaseAI +from RockPaperScissor.models.random_ai import RandomAI +from RockPaperScissor.game_cache.memory_cache import GameSessionCache +from RockPaperScissor.repositories.sql_storage import SQLStorage + +class Move(Enum): + ROCK = "rock" + PAPER = "paper" + SCISSORS = "scissors" + +class GameResult(Enum): + PLAYER_WIN = "player_win" + AI_WIN = "ai_win" + DRAW = "draw" + +class GameService: + def __init__(self, storage=None, db_path: str = "data/game_history.db"): + self.cache = GameSessionCache() + self.model_states: Dict[str, Dict[str, Any]] = {} # Track model state per session+ai + self.ai_models = { + "random": RandomAI(), + "adaptive_markov": AdaptiveMarkovAI() + } + # Use provided storage or fallback to SQLStorage + self.storage = storage or SQLStorage(db_path) + + async def initialize(self): + await self.storage.initialize() + + async def play_round(self, session_id: str, player_move: str, ai_type: str = "random") -> Dict[str, Any]: + try: + # Get or initialize model state + state_key = f"{session_id}__{ai_type}" + model_state = self.model_states.get(state_key, None) + + # Update model state for adaptive_markov AI + if ai_type == "adaptive_markov": + if model_state is None: + model_state = None + else: + model_state["player_second_last_move"] = model_state.get("player_last_move", None) + model_state["player_last_move"] = player_move + + # Get AI move + ai = self.ai_models.get(ai_type, self.ai_models["random"]) + ai_move, updated_state = ai.make_move(model_state) + self.model_states[state_key] = updated_state + + # Determine result + result = self._determine_winner(player_move, ai_move) + + # Create game data for storage + game_data = { + 'game_id': session_id, + 'player_move': player_move, + 'ai_move': ai_move, + 'result': result, + 'ai_type': ai_type, + 'ai_state': updated_state + } + + # Save to storage + await self.storage.save_game_round(game_data) + + # Update cache + self.cache.update_session(session_id, game_data) + + # Get stats from cache + session_data = self.cache.get_session(session_id) + if not session_data: + return { + "player_move": player_move, + "ai_move": ai_move, + "result": result, + "stats": self._get_empty_stats() + } + + return { + "player_move": player_move, + "ai_move": ai_move, + "result": result, + "stats": self._get_formatted_stats(session_data) + } + except Exception as e: + import traceback + print(f"[GameService] Error in play_round: {e}\n{traceback.format_exc()}") + return {"error": str(e)} + + async def save_session_to_db(self, session_id: str) -> bool: + """Save the completed session and all rounds to storage""" + session_data = self.cache.get_session(session_id) + print(f"[DEBUG] save_session_to_db called for session_id: {session_id}") + print(f"[DEBUG] session_data: {session_data}") + if not session_data: + print(f"[GameService] No session data found for session_id: {session_id}") + return False + try: + success = await self.storage.save_full_session(session_id, session_data) + if success: + print(f"[GameService] Session {session_id} saved to storage.") + else: + print(f"[GameService] Failed to save session {session_id} to storage.") + return success + except Exception as e: + import traceback + print(f"[GameService] Error saving session to storage: {e}\n{traceback.format_exc()}") + return False + + def _determine_winner(self, player_move: str, ai_move: str) -> str: + if player_move == ai_move: + return GameResult.DRAW.value + winning_moves = { + Move.ROCK.value: Move.SCISSORS.value, + Move.PAPER.value: Move.ROCK.value, + Move.SCISSORS.value: Move.PAPER.value + } + return GameResult.PLAYER_WIN.value if winning_moves[player_move] == ai_move else GameResult.AI_WIN.value + + def _get_empty_stats(self) -> Dict[str, Any]: + return { + "player_wins": 0, + "ai_wins": 0, + "draws": 0, + "total_rounds": 0, + "player_win_rate": "0.0%", + "ai_win_rate": "0.0%", + "rock_percent": "0%", + "paper_percent": "0%", + "scissors_percent": "0%" + } + + def _get_formatted_stats(self, session_data: Dict[str, Any]) -> Dict[str, Any]: + total = session_data['total_rounds'] + player_wins = session_data['player_wins'] + ai_wins = session_data['ai_wins'] + draws = session_data['draws'] + + # Calculate win rates + player_win_rate = (player_wins / total * 100) if total > 0 else 0 + ai_win_rate = (ai_wins / total * 100) if total > 0 else 0 + + # Calculate player move distribution + rounds = session_data['rounds'] + player_rock_count = sum(1 for r in rounds if r['player_move'] == Move.ROCK.value) + player_paper_count = sum(1 for r in rounds if r['player_move'] == Move.PAPER.value) + player_scissors_count = sum(1 for r in rounds if r['player_move'] == Move.SCISSORS.value) + + # Calculate AI move distribution + ai_rock_count = sum(1 for r in rounds if r['ai_move'] == Move.ROCK.value) + ai_paper_count = sum(1 for r in rounds if r['ai_move'] == Move.PAPER.value) + ai_scissors_count = sum(1 for r in rounds if r['ai_move'] == Move.SCISSORS.value) + + total_moves = total + player_rock_percent = (player_rock_count / total_moves * 100) if total_moves > 0 else 0 + player_paper_percent = (player_paper_count / total_moves * 100) if total_moves > 0 else 0 + player_scissors_percent = (player_scissors_count / total_moves * 100) if total_moves > 0 else 0 + + ai_rock_percent = (ai_rock_count / total_moves * 100) if total_moves > 0 else 0 + ai_paper_percent = (ai_paper_count / total_moves * 100) if total_moves > 0 else 0 + ai_scissors_percent = (ai_scissors_count / total_moves * 100) if total_moves > 0 else 0 + + return { + "player_wins": player_wins, + "ai_wins": ai_wins, + "draws": draws, + "total_rounds": total, + "player_win_rate": f"{player_win_rate:.1f}%", + "ai_win_rate": f"{ai_win_rate:.1f}%", + "player_moves": { + "rock": f"{player_rock_percent:.0f}%", + "paper": f"{player_paper_percent:.0f}%", + "scissors": f"{player_scissors_percent:.0f}%" + }, + "ai_moves": { + "rock": f"{ai_rock_percent:.0f}%", + "paper": f"{ai_paper_percent:.0f}%", + "scissors": f"{ai_scissors_percent:.0f}%" + } + } + + async def clear_session(self, session_id: str): + # Remove only the specific session from cache + self.cache.remove_session(session_id) + # Clear model states + keys_to_remove = [k for k in self.model_states if k.startswith(session_id)] + for k in keys_to_remove: + del self.model_states[k] \ No newline at end of file diff --git a/RockPaperScissor/services/service_instance.py b/RockPaperScissor/services/service_instance.py new file mode 100644 index 0000000000000000000000000000000000000000..d20aadd2a264eb258094209e72b3a97350a9d2db --- /dev/null +++ b/RockPaperScissor/services/service_instance.py @@ -0,0 +1,6 @@ +from RockPaperScissor.services.game_service_class import GameService +from RockPaperScissor.repositories import CombinedStorage + +# Initialize storage with both SQLite and S3 +storage = CombinedStorage() +game_service = GameService(storage=storage) \ No newline at end of file diff --git a/RockPaperScissor/utils/__init__.py b/RockPaperScissor/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3afc6f4808c5d34b3ecced3561bb18c770e89f71 --- /dev/null +++ b/RockPaperScissor/utils/__init__.py @@ -0,0 +1,6 @@ +# RockPaperScissor/utils/__init__.py +from .logging import setup_logging + +__all__ = [ + 'setup_logging', +] \ No newline at end of file diff --git a/RockPaperScissor/utils/__pycache__/__init__.cpython-310.pyc b/RockPaperScissor/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..719a7d6b81e9a16ae6cf794f05422ef6d77d359a Binary files /dev/null and b/RockPaperScissor/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/RockPaperScissor/utils/__pycache__/__init__.cpython-313.pyc b/RockPaperScissor/utils/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f420f95da7ebc988b3db841c11c6c85ee826fdaa Binary files /dev/null and b/RockPaperScissor/utils/__pycache__/__init__.cpython-313.pyc differ diff --git a/RockPaperScissor/utils/__pycache__/logging.cpython-310.pyc b/RockPaperScissor/utils/__pycache__/logging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..535afa2fb3cf71fb5715003c0b6c14d960d7cb53 Binary files /dev/null and b/RockPaperScissor/utils/__pycache__/logging.cpython-310.pyc differ diff --git a/RockPaperScissor/utils/__pycache__/logging.cpython-313.pyc b/RockPaperScissor/utils/__pycache__/logging.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9757a626330730767c576dd1cc199457343bd2e3 Binary files /dev/null and b/RockPaperScissor/utils/__pycache__/logging.cpython-313.pyc differ diff --git a/RockPaperScissor/utils/logging.py b/RockPaperScissor/utils/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..c390fab6fa6f2a7fff533b18cd187977dcdadeae --- /dev/null +++ b/RockPaperScissor/utils/logging.py @@ -0,0 +1,29 @@ +# RockPaperScissor/utils/logging.py +import logging +import sys + +# Store the logger instance to prevent re-creation/multiple handlers +_logger_instance = None + +def setup_logging(log_level_str: str = "INFO") -> logging.Logger: + global _logger_instance + if _logger_instance is None: + logger = logging.getLogger("RPS_Gradio_App") + logger.propagate = False # Prevent Gunicorn/Uvicorn from duplicating root logger's messages + + # Remove existing handlers if any (e.g., from previous calls or other libs) + if logger.hasHandlers(): + logger.handlers.clear() + + log_level = getattr(logging, log_level_str.upper(), logging.INFO) + logger.setLevel(log_level) + + handler = logging.StreamHandler(sys.stdout) # Log to stdout for HF Spaces + formatter = logging.Formatter( + '%(asctime)s - %(name)s - %(levelname)s - [%(filename)s:%(lineno)d] - %(message)s' + ) + handler.setFormatter(formatter) + logger.addHandler(handler) + _logger_instance = logger + print(f"Logger '{logger.name}' configured with level {log_level_str} and stream handler.") + return _logger_instance \ No newline at end of file