Spaces:
Running
Running
| """ | |
| FomoFeed - Timing Optimizer AI v2 | |
| WITH CLIP SCORE & VIDEO BOOST | |
| """ | |
| from fastapi import FastAPI, HTTPException | |
| from pydantic import BaseModel | |
| import numpy as np | |
| from datetime import datetime, timedelta | |
| from collections import Counter | |
| import uvicorn | |
| app = FastAPI(title="FomoFeed Timing Optimizer", version="2.0.0") | |
| class UserEngagementHistory(BaseModel): | |
| user_id: int | |
| engagement_hours: list[int] # List of hours when user got engagement (0-23) | |
| engagement_weights: list[float] # Corresponding weights (view=1, like=3, comment=5, save=7) | |
| content_type: str = "post" # "post" or "moment" | |
| timezone_offset: int = 3 # Turkey = +3 | |
| clip_score: float = 0.0 # 🆕 CLIP visual quality score (0-10) | |
| has_video: bool = False # 🆕 Is this video content? | |
| class TimingRecommendation(BaseModel): | |
| optimal_hour: int # 0-23 | |
| confidence: float # 0-1 | |
| alternative_hours: list[int] | |
| reasoning: dict | |
| def calculate_optimal_time(history: UserEngagementHistory) -> dict: | |
| """ | |
| Analyze user's engagement patterns and recommend best posting time | |
| WITH CLIP SCORE & VIDEO BOOST | |
| """ | |
| if not history.engagement_hours or not history.engagement_weights: | |
| # No data - return generic best times | |
| return { | |
| "optimal_hour": 19, # 7 PM default | |
| "confidence": 0.3, | |
| "alternative_hours": [12, 13, 20, 21], | |
| "reasoning": { | |
| "method": "default", | |
| "note": "Using generic peak hours (no user data)", | |
| "clip_boost": False, | |
| "video_boost": False | |
| } | |
| } | |
| # Create weighted hour distribution | |
| hour_scores = {} | |
| for hour, weight in zip(history.engagement_hours, history.engagement_weights): | |
| hour_scores[hour] = hour_scores.get(hour, 0) + weight | |
| # Add time-of-day bonuses (general social media patterns) | |
| time_bonuses = { | |
| 7: 1.1, # Morning commute | |
| 8: 1.15, | |
| 12: 1.3, # Lunch peak | |
| 13: 1.3, | |
| 18: 1.4, # Evening peak | |
| 19: 1.5, # Prime time | |
| 20: 1.5, | |
| 21: 1.4, | |
| 22: 1.2 | |
| } | |
| for hour in hour_scores: | |
| if hour in time_bonuses: | |
| hour_scores[hour] *= time_bonuses[hour] | |
| # 🆕 CLIP SCORE BOOST | |
| clip_boost_applied = False | |
| if history.clip_score > 0: | |
| # Yüksek kaliteli görseller prime-time'da daha iyi performans gösterir | |
| prime_hours = [12, 13, 18, 19, 20, 21] | |
| clip_multiplier = 1 + (history.clip_score / 10) * 0.3 # Max %30 boost | |
| for hour in prime_hours: | |
| if hour in hour_scores: | |
| hour_scores[hour] *= clip_multiplier | |
| clip_boost_applied = True | |
| else: | |
| # Hiç veri yoksa CLIP skoruna göre başlangıç puanı ver | |
| hour_scores[hour] = history.clip_score * 5 * time_bonuses.get(hour, 1.0) | |
| clip_boost_applied = True | |
| # 🆕 VIDEO BOOST | |
| video_boost_applied = False | |
| if history.has_video: | |
| # Videolar akşam saatlerinde daha iyi performans gösterir | |
| evening_hours = [18, 19, 20, 21, 22] | |
| for hour in evening_hours: | |
| if hour in hour_scores: | |
| hour_scores[hour] *= 1.2 # %20 video boost | |
| video_boost_applied = True | |
| else: | |
| hour_scores[hour] = 50 * time_bonuses.get(hour, 1.0) | |
| video_boost_applied = True | |
| # Add neighboring hour influence (smooth distribution) | |
| smoothed_scores = {} | |
| for hour in range(24): | |
| score = hour_scores.get(hour, 0) | |
| # Add 30% of neighboring hours | |
| prev_hour = (hour - 1) % 24 | |
| next_hour = (hour + 1) % 24 | |
| score += hour_scores.get(prev_hour, 0) * 0.3 | |
| score += hour_scores.get(next_hour, 0) * 0.3 | |
| smoothed_scores[hour] = score | |
| # Find optimal hour | |
| if smoothed_scores: | |
| optimal_hour = max(smoothed_scores.items(), key=lambda x: x[1])[0] | |
| else: | |
| optimal_hour = 19 # Default | |
| # Calculate confidence based on data quality + boosts | |
| total_engagements = len(history.engagement_hours) | |
| base_confidence = min(0.95, 0.5 + (total_engagements / 200)) | |
| # Boost confidence if CLIP or video boosts were applied | |
| if clip_boost_applied and history.clip_score >= 7: | |
| base_confidence = min(0.95, base_confidence + 0.1) | |
| if video_boost_applied: | |
| base_confidence = min(0.95, base_confidence + 0.05) | |
| confidence = base_confidence | |
| # Get top 4 alternative hours | |
| sorted_hours = sorted(smoothed_scores.items(), key=lambda x: x[1], reverse=True) | |
| alternative_hours = [h for h, s in sorted_hours[1:5] if h != optimal_hour] | |
| # Reasoning | |
| reasoning = { | |
| "method": "weighted_pattern_analysis_v2", | |
| "total_engagements": total_engagements, | |
| "unique_hours": len(set(history.engagement_hours)), | |
| "peak_score": round(smoothed_scores[optimal_hour], 2), | |
| "data_quality": "good" if total_engagements > 50 else "moderate" if total_engagements > 20 else "limited", | |
| "clip_boost": clip_boost_applied, | |
| "clip_score": history.clip_score, | |
| "video_boost": video_boost_applied, | |
| "prime_time": optimal_hour in [12, 13, 18, 19, 20, 21] | |
| } | |
| return { | |
| "optimal_hour": optimal_hour, | |
| "confidence": round(confidence, 2), | |
| "alternative_hours": alternative_hours[:4], | |
| "reasoning": reasoning | |
| } | |
| def calculate_next_optimal_times(history: UserEngagementHistory, count: int = 3) -> list[dict]: | |
| """ | |
| Calculate next N optimal posting times in the next 48 hours | |
| """ | |
| result = calculate_optimal_time(history) | |
| optimal_hour = result["optimal_hour"] | |
| now = datetime.now() | |
| current_hour = now.hour | |
| opportunities = [] | |
| # Check next 48 hours | |
| for hours_ahead in range(48): | |
| future_time = now + timedelta(hours=hours_ahead) | |
| future_hour = future_time.hour | |
| # Skip if too soon (less than 2 hours from now) | |
| if hours_ahead < 2: | |
| continue | |
| # Calculate score for this hour | |
| if future_hour == optimal_hour: | |
| score = 100 | |
| elif future_hour in result["alternative_hours"]: | |
| score = 80 | |
| else: | |
| # Use time-of-day bonuses | |
| time_bonuses = {7: 60, 8: 65, 12: 80, 13: 80, 18: 85, 19: 95, 20: 95, 21: 85, 22: 70} | |
| score = time_bonuses.get(future_hour, 40) | |
| # Boost if CLIP score is high and it's prime time | |
| if history.clip_score >= 7 and future_hour in [12, 13, 18, 19, 20, 21]: | |
| score = min(100, score + 10) | |
| # Boost if video and evening | |
| if history.has_video and future_hour in [18, 19, 20, 21, 22]: | |
| score = min(100, score + 5) | |
| opportunities.append({ | |
| "datetime": future_time.isoformat(), | |
| "hour": future_hour, | |
| "score": score, | |
| "hours_from_now": hours_ahead | |
| }) | |
| # Sort by score and return top N | |
| opportunities.sort(key=lambda x: x["score"], reverse=True) | |
| return opportunities[:count] | |
| def root(): | |
| return { | |
| "service": "FomoFeed Timing Optimizer", | |
| "status": "active", | |
| "version": "2.0.0", | |
| "features": ["clip_boost", "video_boost", "prime_time_optimization"] | |
| } | |
| def health(): | |
| return {"status": "healthy", "timestamp": datetime.now().isoformat()} | |
| def predict_optimal_time(history: UserEngagementHistory): | |
| """ | |
| Predict optimal posting time based on user's engagement history | |
| WITH CLIP SCORE & VIDEO BOOST | |
| """ | |
| try: | |
| result = calculate_optimal_time(history) | |
| return TimingRecommendation(**result) | |
| except Exception as e: | |
| raise HTTPException(status_code=500, detail=str(e)) | |
| def get_next_opportunities(history: UserEngagementHistory, count: int = 3): | |
| """ | |
| Get next N optimal posting opportunities in the next 48 hours | |
| """ | |
| try: | |
| opportunities = calculate_next_optimal_times(history, count) | |
| return { | |
| "opportunities": opportunities, | |
| "generated_at": datetime.now().isoformat() | |
| } | |
| except Exception as e: | |
| raise HTTPException(status_code=500, detail=str(e)) | |
| if __name__ == "__main__": | |
| uvicorn.run(app, host="0.0.0.0", port=7860) |