amitbhatt6075 commited on
Commit
4f2c694
Β·
1 Parent(s): c5562b0

fix: re-pushing dynamic config updates

Browse files
Files changed (1) hide show
  1. api/main.py +50 -35
api/main.py CHANGED
@@ -44,7 +44,7 @@ MODELS_DIR = os.path.join(ROOT_DIR, 'models')
44
 
45
  # βœ… FIX: Swapped to a smaller, memory-friendly model to avoid crashing on free tier
46
  MODEL_REPO = "TheBloke/phi-2-GGUF"
47
- MODEL_FILENAME = "phi-2.Q2_K.gguf" # <-- This is a very small, low-quality version that fits in memory
48
 
49
  MODEL_SAVE_DIRECTORY = os.path.join(os.environ.get("WRITABLE_DIR", "/data"), "llm_model")
50
  LLAMA_MODEL_PATH = os.path.join(MODEL_SAVE_DIRECTORY, MODEL_FILENAME)
@@ -71,7 +71,28 @@ _performance_scorer = None
71
  def to_snake(name: str) -> str:
72
  return re.sub(r'(?<!^)(?=[A-Z])', '_', name).lower()
73
 
74
- # --- Pydantic Models (Single Source of Truth) ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
76
  class CreativeChatRequest(BaseModel): message: str; history: list; task_context: str
77
  class FinalizeScriptRequest(BaseModel): history: list; task_context: str
@@ -128,11 +149,15 @@ class CaptionAssistResponse(BaseModel): new_text: str
128
  class ForecastRequest(BaseModel):
129
  budget: float; category: str; follower_count: int; engagement_rate: float
130
  config: Optional[Dict[str, str]] = None
131
- class PerformanceForecast(BaseModel): predicted_engagement_rate: float; predicted_reach: int
132
- class PayoutForecastInput(BaseModel):
133
- total_budget_active_campaigns: float
134
- config: Optional[Dict[str, str]] = None
135
- class ForecastResponse(BaseModel): performance: PerformanceForecast; payout: PayoutForecast
 
 
 
 
136
  class InfluencerKpiData(BaseModel): totalReach: int; totalLikes: int; totalComments: int; avgEngagementRate: float; totalSubmissions: int
137
  class InfluencerAnalyticsSummaryResponse(BaseModel): summary: str
138
  class PortfolioOption(BaseModel): id: str; contentUrl: str; caption: Optional[str] = ""; likes: Optional[int] = 0; campaign: dict
@@ -171,25 +196,6 @@ class WeeklyPlanContext(BaseModel): niche: str; current_mood: str; recent_achiev
171
  class WeeklyPlanRequest(BaseModel): context: WeeklyPlanContext
172
  class PlanOption(BaseModel): type: str; title: str; platform: str; contentType: str; instructions: str; reasoning: str
173
  class WeeklyPlanResponse(BaseModel): options: List[PlanOption]
174
- class RequestConfig(BaseModel):
175
- model_name: Optional[str] = "phi-2"
176
- temperature: Optional[float] = 0.7
177
- system_prompt: Optional[str] = None
178
-
179
- class DirectPromptPayload(BaseModel):
180
- prompt: str
181
- config: Optional[RequestConfig] = None
182
-
183
- class PerformanceForecast(BaseModel):
184
- predicted_engagement_rate: float
185
- predicted_reach: int
186
-
187
- class PayoutForecast(BaseModel):
188
- estimated_earning: float
189
-
190
- class ForecastResponse(BaseModel):
191
- performance: PerformanceForecast
192
- payout: PayoutForecast
193
 
194
  # --- FastAPI App ---
195
  app = FastAPI(title="Reachify AI Service (Deploy-Ready)", version="11.0.0")
@@ -705,7 +711,7 @@ def predict_payout(data: PayoutForecastInput):
705
  return {"forecastedAmount": max(0, pred), "commentary": "Based on budget trends."}
706
 
707
 
708
- @app.post("/analyze/content_quality", response_model=ContentQualityResponse, summary="Analyzes a caption for a quality score")
709
  def analyze_content_quality(request: ContentQualityRequest):
710
  """
711
  Uses the loaded LLM to analyze a social media caption based on several criteria
@@ -760,8 +766,14 @@ Respond ONLY with a valid JSON object in the following format:
760
  import json
761
  analysis_result = json.loads(clean_json_text)
762
 
 
 
 
 
 
 
763
  print("--- Successfully received and parsed JSON response from LLM.")
764
- return ContentQualityResponse(**analysis_result)
765
 
766
  except (json.JSONDecodeError, KeyError, ValueError) as e:
767
  print(f"🚨 ERROR parsing LLM response: {e}. Raw response was: {json_text}")
@@ -1234,9 +1246,10 @@ async def rank_by_similarity_endpoint(request: RankBySimilarityRequest):
1234
  @app.post("/analyze/content-quality", response_model=ContentQualityResponse, summary="Analyzes a caption for a quality score")
1235
  def analyze_content_quality(request: ContentQualityRequest):
1236
  """
1237
- Uses the loaded LLM to analyze a social media caption and returns a robustly parsed response.
 
1238
  """
1239
- print(f"\nβœ… Received request on /analyze/content-quality")
1240
  if not _llm_instance:
1241
  raise HTTPException(status_code=503, detail="The Llama model is not available.")
1242
 
@@ -1270,12 +1283,14 @@ You are a social media expert. Analyze the following caption... Respond ONLY wit
1270
  clean_json_text = json_text[start_index:end_index]
1271
 
1272
  import json
1273
- analysis_result_raw = json.loads(clean_json_text)
 
 
1274
 
1275
  final_result = {
1276
- "overall_score": analysis_result_raw.get("overall_score"),
1277
- "feedback": analysis_result_raw.get("feedback"),
1278
- "scores": analysis_result_raw.get("scores") or analysis_result_raw.get("score")
1279
  }
1280
 
1281
  print("--- Successfully received and parsed JSON response from LLM.")
@@ -1648,4 +1663,4 @@ def create_campaign_from_prompt_endpoint(payload: DirectPromptPayload):
1648
  )
1649
  return {"response": response_text}
1650
  except Exception as e:
1651
- raise HTTPException(status_code=500, detail=str(e))
 
44
 
45
  # βœ… FIX: Swapped to a smaller, memory-friendly model to avoid crashing on free tier
46
  MODEL_REPO = "TheBloke/phi-2-GGUF"
47
+ MODEL_FILENAME = "phi-2.Q2_K.gguf"
48
 
49
  MODEL_SAVE_DIRECTORY = os.path.join(os.environ.get("WRITABLE_DIR", "/data"), "llm_model")
50
  LLAMA_MODEL_PATH = os.path.join(MODEL_SAVE_DIRECTORY, MODEL_FILENAME)
 
71
  def to_snake(name: str) -> str:
72
  return re.sub(r'(?<!^)(?=[A-Z])', '_', name).lower()
73
 
74
+ # ==============================================================
75
+ # 🎯 FIX 1: DEFINE NESTED CLASSES FIRST
76
+ # These MUST come before they are used in ForecastResponse.
77
+ # ==============================================================
78
+
79
+ class PerformanceForecast(BaseModel):
80
+ predicted_engagement_rate: float
81
+ predicted_reach: int
82
+
83
+ class PayoutForecast(BaseModel):
84
+ estimated_earning: float
85
+
86
+ class RequestConfig(BaseModel):
87
+ model_name: Optional[str] = "phi-2"
88
+ temperature: Optional[float] = 0.7
89
+ system_prompt: Optional[str] = None
90
+
91
+ class DirectPromptPayload(BaseModel):
92
+ prompt: str
93
+ config: Optional[RequestConfig] = None
94
+
95
+ # --- Other Pydantic Models ---
96
 
97
  class CreativeChatRequest(BaseModel): message: str; history: list; task_context: str
98
  class FinalizeScriptRequest(BaseModel): history: list; task_context: str
 
149
  class ForecastRequest(BaseModel):
150
  budget: float; category: str; follower_count: int; engagement_rate: float
151
  config: Optional[Dict[str, str]] = None
152
+
153
+ # ==========================================================
154
+ # 🎯 FIX 2: NOW DEFINE ForecastResponse
155
+ # PerformanceForecast and PayoutForecast are already defined above.
156
+ # ==========================================================
157
+ class ForecastResponse(BaseModel):
158
+ performance: PerformanceForecast
159
+ payout: PayoutForecast
160
+
161
  class InfluencerKpiData(BaseModel): totalReach: int; totalLikes: int; totalComments: int; avgEngagementRate: float; totalSubmissions: int
162
  class InfluencerAnalyticsSummaryResponse(BaseModel): summary: str
163
  class PortfolioOption(BaseModel): id: str; contentUrl: str; caption: Optional[str] = ""; likes: Optional[int] = 0; campaign: dict
 
196
  class WeeklyPlanRequest(BaseModel): context: WeeklyPlanContext
197
  class PlanOption(BaseModel): type: str; title: str; platform: str; contentType: str; instructions: str; reasoning: str
198
  class WeeklyPlanResponse(BaseModel): options: List[PlanOption]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
 
200
  # --- FastAPI App ---
201
  app = FastAPI(title="Reachify AI Service (Deploy-Ready)", version="11.0.0")
 
711
  return {"forecastedAmount": max(0, pred), "commentary": "Based on budget trends."}
712
 
713
 
714
+ @app.post("/analyze/content-quality", response_model=ContentQualityResponse, summary="Analyzes a caption for a quality score")
715
  def analyze_content_quality(request: ContentQualityRequest):
716
  """
717
  Uses the loaded LLM to analyze a social media caption based on several criteria
 
766
  import json
767
  analysis_result = json.loads(clean_json_text)
768
 
769
+ final_result = {
770
+ "overall_score": analysis_result_raw.get("overall_score"),
771
+ "feedback": analysis_result_raw.get("feedback"),
772
+ "scores": analysis_result_raw.get("scores") or analysis_result_raw.get("score")
773
+ }
774
+
775
  print("--- Successfully received and parsed JSON response from LLM.")
776
+ return ContentQualityResponse(**final_result)
777
 
778
  except (json.JSONDecodeError, KeyError, ValueError) as e:
779
  print(f"🚨 ERROR parsing LLM response: {e}. Raw response was: {json_text}")
 
1246
  @app.post("/analyze/content-quality", response_model=ContentQualityResponse, summary="Analyzes a caption for a quality score")
1247
  def analyze_content_quality(request: ContentQualityRequest):
1248
  """
1249
+ Uses the loaded LLM to analyze a social media caption based on several criteria
1250
+ and returns a quantitative score and qualitative feedback.
1251
  """
1252
+ print(f"\nβœ… Received request on /analyze/content_quality")
1253
  if not _llm_instance:
1254
  raise HTTPException(status_code=503, detail="The Llama model is not available.")
1255
 
 
1283
  clean_json_text = json_text[start_index:end_index]
1284
 
1285
  import json
1286
+
1287
+ # βœ… FIX: Using consistent variable name 'analysis_result' everywhere
1288
+ analysis_result = json.loads(clean_json_text)
1289
 
1290
  final_result = {
1291
+ "overall_score": analysis_result.get("overall_score"),
1292
+ "feedback": analysis_result.get("feedback"),
1293
+ "scores": analysis_result.get("scores") or analysis_result.get("score")
1294
  }
1295
 
1296
  print("--- Successfully received and parsed JSON response from LLM.")
 
1663
  )
1664
  return {"response": response_text}
1665
  except Exception as e:
1666
+ raise HTTPException(status_code=500, detail=str(e))