Spaces:
Paused
Paused
Paul commited on
Commit ·
bab1caf
1
Parent(s): f194e28
add new logic
Browse files- README.md +1 -1
- app.py +1 -0
- finetuned_reply_service.py +16 -6
README.md
CHANGED
|
@@ -21,7 +21,7 @@ An AI-powered system that generates contextually appropriate replies for convers
|
|
| 21 |
- **Model 1 – LLaMA**: Trigger+move augmented dataset fine-tunes a LLaMA-style LLM to respond like a confident "anh"
|
| 22 |
- **Model 2 – PhoBERT**: Same pipeline but specialized for the Vietnamese `vinai/phobert-base` backbone (encoder-decoder LoRA)
|
| 23 |
- **Model 3 – Wingman LoRA**: Dedicated LoRA head (wingman persona) that still falls back to the prompt-based service if adapters are missing
|
| 24 |
-
- **Wingman Prompt Editor**: The UI shows the current Model 3 prompt and lets you override it live for rapid experimentation
|
| 25 |
- **Automatic Fine-tuning**: All artifacts can be trained automatically on Spaces using `setup_and_finetune.py`
|
| 26 |
|
| 27 |
## 📋 How It Works
|
|
|
|
| 21 |
- **Model 1 – LLaMA**: Trigger+move augmented dataset fine-tunes a LLaMA-style LLM to respond like a confident "anh"
|
| 22 |
- **Model 2 – PhoBERT**: Same pipeline but specialized for the Vietnamese `vinai/phobert-base` backbone (encoder-decoder LoRA)
|
| 23 |
- **Model 3 – Wingman LoRA**: Dedicated LoRA head (wingman persona) that still falls back to the prompt-based service if adapters are missing
|
| 24 |
+
- **Wingman Prompt Editor**: The UI shows the current Model 3 prompt and lets you override it live (supports `{conversation}`, `{trigger}`, `{move}` placeholders) for rapid experimentation
|
| 25 |
- **Automatic Fine-tuning**: All artifacts can be trained automatically on Spaces using `setup_and_finetune.py`
|
| 26 |
|
| 27 |
## 📋 How It Works
|
app.py
CHANGED
|
@@ -282,6 +282,7 @@ with gr.Blocks(title=title) as demo:
|
|
| 282 |
label="Custom Wingman Prompt",
|
| 283 |
value=DEFAULT_WINGMAN_PROMPT,
|
| 284 |
placeholder="Adjust how the wingman persona should respond...",
|
|
|
|
| 285 |
)
|
| 286 |
gr.Markdown("Leave as-is for default behavior. Edits apply to Model 3 when its LoRA is used.")
|
| 287 |
reply_btn = gr.Button("Generate Reply Suggestion", variant="primary", size="lg")
|
|
|
|
| 282 |
label="Custom Wingman Prompt",
|
| 283 |
value=DEFAULT_WINGMAN_PROMPT,
|
| 284 |
placeholder="Adjust how the wingman persona should respond...",
|
| 285 |
+
info="You can reference {conversation}, {trigger}, {move} inside your prompt.",
|
| 286 |
)
|
| 287 |
gr.Markdown("Leave as-is for default behavior. Edits apply to Model 3 when its LoRA is used.")
|
| 288 |
reply_btn = gr.Button("Generate Reply Suggestion", variant="primary", size="lg")
|
finetuned_reply_service.py
CHANGED
|
@@ -232,12 +232,22 @@ class FinetunedReplyService:
|
|
| 232 |
]
|
| 233 |
|
| 234 |
if self.persona == "wingman":
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 241 |
else:
|
| 242 |
base_prompt.append(
|
| 243 |
"Generate the next appropriate response from Male to Female. The reply should be from Male's perspective, "
|
|
|
|
| 232 |
]
|
| 233 |
|
| 234 |
if self.persona == "wingman":
|
| 235 |
+
if custom_prompt:
|
| 236 |
+
try:
|
| 237 |
+
persona_text = custom_prompt.format(
|
| 238 |
+
conversation=conversation,
|
| 239 |
+
trigger=trigger,
|
| 240 |
+
move=move,
|
| 241 |
+
)
|
| 242 |
+
except Exception:
|
| 243 |
+
persona_text = custom_prompt
|
| 244 |
+
else:
|
| 245 |
+
persona_text = (
|
| 246 |
+
"You are a charming Vietnamese wingman crafting irresistible yet respectful replies for Male. "
|
| 247 |
+
"Keep replies concise (<35 words), confident, playful, and always from Male's perspective using 'anh' for self and 'em' for partner. "
|
| 248 |
+
"Embed subtle compliments/flirtation while honoring the identified move."
|
| 249 |
+
)
|
| 250 |
+
base_prompt.append(persona_text)
|
| 251 |
else:
|
| 252 |
base_prompt.append(
|
| 253 |
"Generate the next appropriate response from Male to Female. The reply should be from Male's perspective, "
|