safiaa02 commited on
Commit
60d74a3
·
verified ·
1 Parent(s): e54c907

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -33
app.py CHANGED
@@ -1,13 +1,14 @@
1
  import os
2
  import gradio as gr
3
- import requests
4
 
5
- # Hugging Face Chat Completion Endpoint
6
- API_URL = "https://router.huggingface.co/novita/v3/openai/chat/completions"
7
- HEADERS = {"Authorization": f"Bearer {os.environ['HF_TOKEN']}"}
8
- MODEL = "mistralai/Mistral-7B-Instruct-v0.1"
 
9
 
10
- # Suggested test prompts
11
  preset_prompts = [
12
  "I finally got the promotion, but I feel guilty because my best friend got laid off.",
13
  "Moving to a new city is exciting, but leaving my family breaks my heart.",
@@ -16,43 +17,46 @@ preset_prompts = [
16
  "I’m happy for her, but I wish I had that too.",
17
  ]
18
 
19
- # Call the Hugging Face router endpoint with Mistral
20
- def call_mistral_chat(messages):
21
- payload = {
22
- "model": MODEL,
23
- "messages": messages
24
- }
25
-
26
  try:
27
- response = requests.post(API_URL, headers=HEADERS, json=payload, timeout=45)
28
- response.raise_for_status()
29
- data = response.json()
30
-
31
- return data["choices"][0]["message"]["content"]
32
  except Exception as e:
33
  return f"⚠️ Error: {str(e)}"
34
 
35
- # Emotion annotation logic
36
- def emotion_annotator(user_text):
37
- # Step 1: Generate candidate emotions
38
- prompt1 = f"You are an emotion analysis expert. List all possible emotions the person might be feeling in this sentence:\n\n\"{user_text}\"\n\nAnswer with just the emotion names."
39
- messages1 = [{"role": "user", "content": prompt1}]
40
- candidate_emotions = call_mistral_chat(messages1)
 
 
 
 
41
 
42
- # Step 2: Disambiguate to most likely emotion
43
- prompt2 = f"""Now from this list of emotions: {candidate_emotions}, pick the most likely one the person is feeling and explain why. Sentence: "{user_text}"\n\nFormat:\nMost likely emotion: <emotion>\nReason: <why>"""
44
- messages2 = [{"role": "user", "content": prompt2}]
45
- final_emotion = call_mistral_chat(messages2)
 
 
 
 
46
 
47
- return candidate_emotions.strip(), final_emotion.strip()
48
 
49
  # Gradio UI
50
  with gr.Blocks() as demo:
51
- gr.Markdown("## 🧠 Emotion Annotator AI (Powered by Mistral 7B via HF Chat API)")
52
- gr.Markdown("Disambiguates complex emotions using `mistralai/Mistral-7B-Instruct-v0.1` through Hugging Face's Chat Completion endpoint.")
53
 
54
  with gr.Row():
55
- text_input = gr.Textbox(label="📝 Input Sentence", placeholder="e.g., I’m proud but I feel like I let them down.", lines=2)
56
  dropdown = gr.Dropdown(preset_prompts, label="💬 Choose an example")
57
  run_button = gr.Button("Submit")
58
 
@@ -60,7 +64,7 @@ with gr.Blocks() as demo:
60
  candidate_output = gr.Textbox(label="🧠 Candidate Emotions")
61
  final_output = gr.Textbox(label="🎯 Most Likely Emotion + Explanation")
62
 
63
- # Auto-fill textbox from dropdown
64
  dropdown.change(fn=lambda x: x, inputs=dropdown, outputs=text_input)
65
  run_button.click(fn=emotion_annotator, inputs=text_input, outputs=[candidate_output, final_output])
66
 
 
1
  import os
2
  import gradio as gr
3
+ from huggingface_hub import InferenceClient
4
 
5
+ # Load Hugging Face token from secret
6
+ client = InferenceClient(
7
+ provider="nscale", # You can change to 'openrouter' or 'novita' if needed
8
+ api_key=os.environ["HF_TOKEN"],
9
+ )
10
 
11
+ # Test prompt list
12
  preset_prompts = [
13
  "I finally got the promotion, but I feel guilty because my best friend got laid off.",
14
  "Moving to a new city is exciting, but leaving my family breaks my heart.",
 
17
  "I’m happy for her, but I wish I had that too.",
18
  ]
19
 
20
+ # Core generation logic using chat completion
21
+ def call_llama(messages):
 
 
 
 
 
22
  try:
23
+ completion = client.chat.completions.create(
24
+ model="meta-llama/Llama-3.1-8B-Instruct",
25
+ messages=messages,
26
+ )
27
+ return completion.choices[0].message.content.strip()
28
  except Exception as e:
29
  return f"⚠️ Error: {str(e)}"
30
 
31
+ # Emotion pipeline
32
+ def emotion_annotator(text):
33
+ # Step 1: List candidate emotions
34
+ msg1 = [
35
+ {
36
+ "role": "user",
37
+ "content": f'List all possible emotions the person might be feeling in this sentence:\n"{text}"\nJust give comma-separated emotion names.'
38
+ }
39
+ ]
40
+ candidates = call_llama(msg1)
41
 
42
+ # Step 2: Choose most likely emotion with reason
43
+ msg2 = [
44
+ {
45
+ "role": "user",
46
+ "content": f'From these emotions: {candidates}, which is most likely the dominant one in the sentence "{text}"? Explain why briefly.\nFormat:\nMost likely emotion: <emotion>\nReason: <why>'
47
+ }
48
+ ]
49
+ final = call_llama(msg2)
50
 
51
+ return candidates, final
52
 
53
  # Gradio UI
54
  with gr.Blocks() as demo:
55
+ gr.Markdown("## 🧠 Emotion Annotator (LLaMA 3.1 via Hugging Face Chat API)")
56
+ gr.Markdown("Powered by `meta-llama/Llama-3.1-8B-Instruct`, served using the InferenceClient chat interface.")
57
 
58
  with gr.Row():
59
+ text_input = gr.Textbox(label="✏️ Input Sentence", placeholder="e.g., I’m proud but I feel like I let them down.")
60
  dropdown = gr.Dropdown(preset_prompts, label="💬 Choose an example")
61
  run_button = gr.Button("Submit")
62
 
 
64
  candidate_output = gr.Textbox(label="🧠 Candidate Emotions")
65
  final_output = gr.Textbox(label="🎯 Most Likely Emotion + Explanation")
66
 
67
+ # Dropdown autofill
68
  dropdown.change(fn=lambda x: x, inputs=dropdown, outputs=text_input)
69
  run_button.click(fn=emotion_annotator, inputs=text_input, outputs=[candidate_output, final_output])
70