File size: 8,531 Bytes
aafc33f
a0f12d3
 
aafc33f
 
 
 
 
 
 
 
 
a0f12d3
aafc33f
 
 
 
 
 
 
a0f12d3
 
aafc33f
 
 
 
 
 
 
 
 
 
a0f12d3
aafc33f
 
 
 
 
 
 
 
 
a0f12d3
 
 
aafc33f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a0f12d3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
aafc33f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a0f12d3
aafc33f
a0f12d3
 
 
 
 
 
 
 
 
aafc33f
 
 
 
 
 
 
a0f12d3
aafc33f
 
 
a0f12d3
 
 
 
 
aafc33f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a0f12d3
 
 
 
 
 
 
 
aafc33f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a0f12d3
 
aafc33f
 
 
 
 
 
 
 
 
 
a0f12d3
aafc33f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
"""
FomoFeed - Timing Optimizer AI v2
WITH CLIP SCORE & VIDEO BOOST
"""

from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
import numpy as np
from datetime import datetime, timedelta
from collections import Counter
import uvicorn

app = FastAPI(title="FomoFeed Timing Optimizer", version="2.0.0")

class UserEngagementHistory(BaseModel):
    user_id: int
    engagement_hours: list[int]  # List of hours when user got engagement (0-23)
    engagement_weights: list[float]  # Corresponding weights (view=1, like=3, comment=5, save=7)
    content_type: str = "post"  # "post" or "moment"
    timezone_offset: int = 3  # Turkey = +3
    clip_score: float = 0.0  # 🆕 CLIP visual quality score (0-10)
    has_video: bool = False  # 🆕 Is this video content?
    
class TimingRecommendation(BaseModel):
    optimal_hour: int  # 0-23
    confidence: float  # 0-1
    alternative_hours: list[int]
    reasoning: dict

def calculate_optimal_time(history: UserEngagementHistory) -> dict:
    """
    Analyze user's engagement patterns and recommend best posting time
    WITH CLIP SCORE & VIDEO BOOST
    """
    if not history.engagement_hours or not history.engagement_weights:
        # No data - return generic best times
        return {
            "optimal_hour": 19,  # 7 PM default
            "confidence": 0.3,
            "alternative_hours": [12, 13, 20, 21],
            "reasoning": {
                "method": "default",
                "note": "Using generic peak hours (no user data)",
                "clip_boost": False,
                "video_boost": False
            }
        }
    
    # Create weighted hour distribution
    hour_scores = {}
    for hour, weight in zip(history.engagement_hours, history.engagement_weights):
        hour_scores[hour] = hour_scores.get(hour, 0) + weight
    
    # Add time-of-day bonuses (general social media patterns)
    time_bonuses = {
        7: 1.1,   # Morning commute
        8: 1.15,
        12: 1.3,  # Lunch peak
        13: 1.3,
        18: 1.4,  # Evening peak
        19: 1.5,  # Prime time
        20: 1.5,
        21: 1.4,
        22: 1.2
    }
    
    for hour in hour_scores:
        if hour in time_bonuses:
            hour_scores[hour] *= time_bonuses[hour]
    
    # 🆕 CLIP SCORE BOOST
    clip_boost_applied = False
    if history.clip_score > 0:
        # Yüksek kaliteli görseller prime-time'da daha iyi performans gösterir
        prime_hours = [12, 13, 18, 19, 20, 21]
        clip_multiplier = 1 + (history.clip_score / 10) * 0.3  # Max %30 boost
        
        for hour in prime_hours:
            if hour in hour_scores:
                hour_scores[hour] *= clip_multiplier
                clip_boost_applied = True
            else:
                # Hiç veri yoksa CLIP skoruna göre başlangıç puanı ver
                hour_scores[hour] = history.clip_score * 5 * time_bonuses.get(hour, 1.0)
                clip_boost_applied = True
    
    # 🆕 VIDEO BOOST
    video_boost_applied = False
    if history.has_video:
        # Videolar akşam saatlerinde daha iyi performans gösterir
        evening_hours = [18, 19, 20, 21, 22]
        for hour in evening_hours:
            if hour in hour_scores:
                hour_scores[hour] *= 1.2  # %20 video boost
                video_boost_applied = True
            else:
                hour_scores[hour] = 50 * time_bonuses.get(hour, 1.0)
                video_boost_applied = True
    
    # Add neighboring hour influence (smooth distribution)
    smoothed_scores = {}
    for hour in range(24):
        score = hour_scores.get(hour, 0)
        # Add 30% of neighboring hours
        prev_hour = (hour - 1) % 24
        next_hour = (hour + 1) % 24
        score += hour_scores.get(prev_hour, 0) * 0.3
        score += hour_scores.get(next_hour, 0) * 0.3
        smoothed_scores[hour] = score
    
    # Find optimal hour
    if smoothed_scores:
        optimal_hour = max(smoothed_scores.items(), key=lambda x: x[1])[0]
    else:
        optimal_hour = 19  # Default
    
    # Calculate confidence based on data quality + boosts
    total_engagements = len(history.engagement_hours)
    base_confidence = min(0.95, 0.5 + (total_engagements / 200))
    
    # Boost confidence if CLIP or video boosts were applied
    if clip_boost_applied and history.clip_score >= 7:
        base_confidence = min(0.95, base_confidence + 0.1)
    if video_boost_applied:
        base_confidence = min(0.95, base_confidence + 0.05)
    
    confidence = base_confidence
    
    # Get top 4 alternative hours
    sorted_hours = sorted(smoothed_scores.items(), key=lambda x: x[1], reverse=True)
    alternative_hours = [h for h, s in sorted_hours[1:5] if h != optimal_hour]
    
    # Reasoning
    reasoning = {
        "method": "weighted_pattern_analysis_v2",
        "total_engagements": total_engagements,
        "unique_hours": len(set(history.engagement_hours)),
        "peak_score": round(smoothed_scores[optimal_hour], 2),
        "data_quality": "good" if total_engagements > 50 else "moderate" if total_engagements > 20 else "limited",
        "clip_boost": clip_boost_applied,
        "clip_score": history.clip_score,
        "video_boost": video_boost_applied,
        "prime_time": optimal_hour in [12, 13, 18, 19, 20, 21]
    }
    
    return {
        "optimal_hour": optimal_hour,
        "confidence": round(confidence, 2),
        "alternative_hours": alternative_hours[:4],
        "reasoning": reasoning
    }

def calculate_next_optimal_times(history: UserEngagementHistory, count: int = 3) -> list[dict]:
    """
    Calculate next N optimal posting times in the next 48 hours
    """
    result = calculate_optimal_time(history)
    optimal_hour = result["optimal_hour"]
    
    now = datetime.now()
    current_hour = now.hour
    
    opportunities = []
    
    # Check next 48 hours
    for hours_ahead in range(48):
        future_time = now + timedelta(hours=hours_ahead)
        future_hour = future_time.hour
        
        # Skip if too soon (less than 2 hours from now)
        if hours_ahead < 2:
            continue
        
        # Calculate score for this hour
        if future_hour == optimal_hour:
            score = 100
        elif future_hour in result["alternative_hours"]:
            score = 80
        else:
            # Use time-of-day bonuses
            time_bonuses = {7: 60, 8: 65, 12: 80, 13: 80, 18: 85, 19: 95, 20: 95, 21: 85, 22: 70}
            score = time_bonuses.get(future_hour, 40)
        
        # Boost if CLIP score is high and it's prime time
        if history.clip_score >= 7 and future_hour in [12, 13, 18, 19, 20, 21]:
            score = min(100, score + 10)
        
        # Boost if video and evening
        if history.has_video and future_hour in [18, 19, 20, 21, 22]:
            score = min(100, score + 5)
        
        opportunities.append({
            "datetime": future_time.isoformat(),
            "hour": future_hour,
            "score": score,
            "hours_from_now": hours_ahead
        })
    
    # Sort by score and return top N
    opportunities.sort(key=lambda x: x["score"], reverse=True)
    return opportunities[:count]

@app.get("/")
def root():
    return {
        "service": "FomoFeed Timing Optimizer",
        "status": "active",
        "version": "2.0.0",
        "features": ["clip_boost", "video_boost", "prime_time_optimization"]
    }

@app.get("/health")
def health():
    return {"status": "healthy", "timestamp": datetime.now().isoformat()}

@app.post("/predict", response_model=TimingRecommendation)
def predict_optimal_time(history: UserEngagementHistory):
    """
    Predict optimal posting time based on user's engagement history
    WITH CLIP SCORE & VIDEO BOOST
    """
    try:
        result = calculate_optimal_time(history)
        return TimingRecommendation(**result)
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@app.post("/next_opportunities")
def get_next_opportunities(history: UserEngagementHistory, count: int = 3):
    """
    Get next N optimal posting opportunities in the next 48 hours
    """
    try:
        opportunities = calculate_next_optimal_times(history, count)
        return {
            "opportunities": opportunities,
            "generated_at": datetime.now().isoformat()
        }
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=7860)