kappai commited on
Commit
04fb29c
Β·
verified Β·
1 Parent(s): bacbedf

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -24
app.py CHANGED
@@ -11,7 +11,7 @@ from huggingface_hub import InferenceClient
11
  # CONFIG
12
 
13
  # You can hard-code the model here, or set MODEL_ID as a Space secret.
14
- MODEL_ID = os.environ.get("MODEL_ID")
15
  HF_TOKEN = os.environ.get("HF_TOKEN") # optional for this public model
16
 
17
  REPO_PATH = "/data/questions.json" # where we store generated questions
@@ -397,32 +397,33 @@ def normalize_output(
397
 
398
  def ai_generate(lang: str, category_key: str, variant: str) -> Dict[str, Any]:
399
  prompt = build_prompt(lang, category_key, variant)
400
- raw_text = None
401
  try:
402
  raw_text = model_call(prompt)
403
- except Exception:
404
- raw_text = None
405
-
406
- parsed = try_parse_json(raw_text) if raw_text else None
407
-
408
- if parsed:
409
- normalized = normalize_output(parsed, lang, category_key, variant)
410
- return normalized
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
411
 
412
- # fallback: shuffle fewshots
413
- few = FEWSHOTS[lang][category_key]
414
- q_pool = few["questions"][:]
415
- m_pool = few["micro_actions"][:]
416
- random.shuffle(q_pool)
417
- random.shuffle(m_pool)
418
- return {
419
- "category": category_key,
420
- "language": lang,
421
- "questions": (q_pool + [""] * 4)[:4],
422
- "micro_actions": (m_pool + [""] * 2)[:2],
423
- "tone": "fallback",
424
- "safety_notes": "",
425
- }
426
 
427
 
428
  # ────────────────────────────────────────────────────────────────────────────────
 
11
  # CONFIG
12
 
13
  # You can hard-code the model here, or set MODEL_ID as a Space secret.
14
+ MODEL_ID = os.environ.get("MODEL_ID", "meta-llama/Meta-Llama-3.1-8B-Instruct")
15
  HF_TOKEN = os.environ.get("HF_TOKEN") # optional for this public model
16
 
17
  REPO_PATH = "/data/questions.json" # where we store generated questions
 
397
 
398
  def ai_generate(lang: str, category_key: str, variant: str) -> Dict[str, Any]:
399
  prompt = build_prompt(lang, category_key, variant)
 
400
  try:
401
  raw_text = model_call(prompt)
402
+ parsed = try_parse_json(raw_text) if raw_text else None
403
+
404
+ if parsed:
405
+ return normalize_output(parsed, lang, category_key, variant)
406
+
407
+ # If model replied but JSON is bad, show that in safety_notes
408
+ return {
409
+ "category": category_key,
410
+ "language": lang,
411
+ "questions": FEWSHOTS[lang][category_key]["questions"][:4],
412
+ "micro_actions": FEWSHOTS[lang][category_key]["micro_actions"][:2],
413
+ "tone": "fallback",
414
+ "safety_notes": f"Model replied but JSON parsing failed. raw_text starts with: {repr(raw_text[:120])}",
415
+ }
416
+ except Exception as e:
417
+ # Surface the error
418
+ return {
419
+ "category": category_key,
420
+ "language": lang,
421
+ "questions": FEWSHOTS[lang][category_key]["questions"][:4],
422
+ "micro_actions": FEWSHOTS[lang][category_key]["micro_actions"][:2],
423
+ "tone": "error",
424
+ "safety_notes": f"Model call error: {type(e).__name__}: {e}",
425
+ }
426
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
427
 
428
 
429
  # ────────────────────────────────────────────────────────────────────────────────