Spaces:
Runtime error
Runtime error
| import numpy as np | |
| from .visual_trading_env import VisualTradingEnvironment | |
| from src.sentiment.twitter_analyzer import AdvancedSentimentAnalyzer | |
| from typing import Dict, Any | |
| class AdvancedTradingEnvironment(VisualTradingEnvironment): | |
| def __init__(self, initial_balance=10000, risk_level="Medium", asset_type="Crypto", | |
| use_sentiment=True, sentiment_influence=0.3): | |
| super().__init__(initial_balance, risk_level, asset_type) | |
| self.use_sentiment = use_sentiment | |
| self.sentiment_influence = sentiment_influence # How much sentiment affects decisions | |
| self.sentiment_history = [] | |
| self.sentiment_window = 20 | |
| if use_sentiment: | |
| self.sentiment_analyzer = AdvancedSentimentAnalyzer() | |
| self.sentiment_analyzer.initialize_models() | |
| self.current_sentiment = 0.5 | |
| self.sentiment_confidence = 0.0 | |
| def step(self, action): | |
| """Execute trading step with sentiment influence""" | |
| # Get market sentiment before executing action | |
| if self.use_sentiment and self.current_step % 5 == 0: # Update sentiment every 5 steps | |
| self._update_sentiment() | |
| # Execute the original step | |
| observation, reward, done, info = super().step(action) | |
| # Enhance reward with sentiment analysis | |
| if self.use_sentiment: | |
| reward = self._apply_sentiment_to_reward(reward, action, info) | |
| # Add sentiment info to the observation | |
| enhanced_observation = self._enhance_observation(observation) | |
| # Add sentiment data to info | |
| info['sentiment'] = self.current_sentiment | |
| info['sentiment_confidence'] = self.sentiment_confidence | |
| info['sentiment_influence'] = self.sentiment_influence | |
| return enhanced_observation, reward, done, info | |
| def _update_sentiment(self): | |
| """Update current market sentiment""" | |
| try: | |
| sentiment_data = self.sentiment_analyzer.get_influencer_sentiment() | |
| self.current_sentiment = sentiment_data['market_sentiment'] | |
| self.sentiment_confidence = sentiment_data['confidence'] | |
| # Update sentiment history | |
| self.sentiment_history.append(self.current_sentiment) | |
| if len(self.sentiment_history) > self.sentiment_window: | |
| self.sentiment_history.pop(0) | |
| except Exception as e: | |
| print(f"Error updating sentiment: {e}") | |
| self.current_sentiment = 0.5 | |
| self.sentiment_confidence = 0.0 | |
| def _apply_sentiment_to_reward(self, original_reward: float, action: int, info: Dict) -> float: | |
| """Modify reward based on sentiment analysis""" | |
| if self.sentiment_confidence < 0.3: # Low confidence, minimal influence | |
| return original_reward | |
| sentiment_multiplier = 1.0 | |
| # Bullish sentiment should reward buying actions | |
| if self.current_sentiment > 0.6: # Bullish | |
| if action == 1: # Buy | |
| sentiment_multiplier += self.sentiment_influence * self.sentiment_confidence | |
| elif action == 2: # Sell (increase position) | |
| sentiment_multiplier += self.sentiment_influence * 0.5 * self.sentiment_confidence | |
| elif action == 3: # Close (might miss opportunity) | |
| sentiment_multiplier -= self.sentiment_influence * 0.3 * self.sentiment_confidence | |
| # Bearish sentiment should reward selling/closing actions | |
| elif self.current_sentiment < 0.4: # Bearish | |
| if action == 3: # Close position | |
| sentiment_multiplier += self.sentiment_influence * self.sentiment_confidence | |
| elif action == 1: # Buy (might be risky) | |
| sentiment_multiplier -= self.sentiment_influence * 0.5 * self.sentiment_confidence | |
| # Apply sentiment trend momentum | |
| if len(self.sentiment_history) > 5: | |
| recent_trend = np.mean(self.sentiment_history[-5:]) - np.mean(self.sentiment_history[-10:-5]) | |
| trend_influence = recent_trend * self.sentiment_influence * 0.5 | |
| sentiment_multiplier += trend_influence | |
| enhanced_reward = original_reward * sentiment_multiplier | |
| # Ensure reward doesn't become too extreme | |
| max_reward = abs(original_reward) * 3 | |
| return np.clip(enhanced_reward, -max_reward, max_reward) | |
| def _enhance_observation(self, original_observation): | |
| """Add sentiment data to observation""" | |
| # For simplicity, we'll keep the original visual observation | |
| # In a more advanced implementation, we could encode sentiment in the image | |
| return original_observation | |
| def get_sentiment_analysis(self) -> Dict: | |
| """Get detailed sentiment analysis""" | |
| if not self.use_sentiment: | |
| return {"error": "Sentiment analysis disabled"} | |
| return { | |
| "current_sentiment": self.current_sentiment, | |
| "sentiment_confidence": self.sentiment_confidence, | |
| "sentiment_trend": self._calculate_sentiment_trend(), | |
| "influence_level": self.sentiment_influence, | |
| "history_length": len(self.sentiment_history) | |
| } | |
| def _calculate_sentiment_trend(self) -> str: | |
| """Calculate sentiment trend direction""" | |
| if len(self.sentiment_history) < 5: | |
| return "neutral" | |
| recent = np.mean(self.sentiment_history[-5:]) | |
| previous = np.mean(self.sentiment_history[-10:-5]) if len(self.sentiment_history) >= 10 else recent | |
| if recent > previous + 0.1: | |
| return "improving" | |
| elif recent < previous - 0.1: | |
| return "deteriorating" | |
| else: | |
| return "stable" | |
| def reset(self): | |
| """Reset environment including sentiment data""" | |
| observation = super().reset() | |
| self.sentiment_history = [] | |
| self.current_sentiment = 0.5 | |
| self.sentiment_confidence = 0.0 | |
| return observation |