Spaces:
Sleeping
Sleeping
| import os | |
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| # Load Hugging Face token from secret | |
| client = InferenceClient( | |
| provider="nscale", # You can change to 'openrouter' or 'novita' if needed | |
| api_key=os.environ["HF_TOKEN"], | |
| ) | |
| # Test prompt list | |
| preset_prompts = [ | |
| "I finally got the promotion, but I feel guilty because my best friend got laid off.", | |
| "Moving to a new city is exciting, but leaving my family breaks my heart.", | |
| "I passed the test, but my friend failed — and I don’t know how to feel.", | |
| "They applauded me on stage, but all I could think about was how lonely I felt.", | |
| "I’m happy for her, but I wish I had that too.", | |
| ] | |
| # Core generation logic using chat completion | |
| def call_llama(messages): | |
| try: | |
| completion = client.chat.completions.create( | |
| model="meta-llama/Llama-3.1-8B-Instruct", | |
| messages=messages, | |
| ) | |
| return completion.choices[0].message.content.strip() | |
| except Exception as e: | |
| return f"⚠️ Error: {str(e)}" | |
| # Emotion pipeline | |
| def emotion_annotator(text): | |
| # Step 1: List candidate emotions | |
| msg1 = [ | |
| { | |
| "role": "user", | |
| "content": f'List all possible emotions the person might be feeling in this sentence:\n"{text}"\nJust give comma-separated emotion names.' | |
| } | |
| ] | |
| candidates = call_llama(msg1) | |
| # Step 2: Choose most likely emotion with reason | |
| msg2 = [ | |
| { | |
| "role": "user", | |
| "content": f'From these emotions: {candidates}, which is most likely the dominant one in the sentence "{text}"? Explain why briefly.\nFormat:\nMost likely emotion: <emotion>\nReason: <why>' | |
| } | |
| ] | |
| final = call_llama(msg2) | |
| return candidates, final | |
| # Gradio UI | |
| with gr.Blocks() as demo: | |
| gr.Markdown("## 🧠 Emotion Annotator (LLaMA 3.1 via Hugging Face Chat API)") | |
| gr.Markdown("Powered by `meta-llama/Llama-3.1-8B-Instruct`, served using the InferenceClient chat interface.") | |
| with gr.Row(): | |
| text_input = gr.Textbox(label="✏️ Input Sentence", placeholder="e.g., I’m proud but I feel like I let them down.") | |
| dropdown = gr.Dropdown(preset_prompts, label="💬 Choose an example") | |
| run_button = gr.Button("Submit") | |
| with gr.Row(): | |
| candidate_output = gr.Textbox(label="🧠 Candidate Emotions") | |
| final_output = gr.Textbox(label="🎯 Most Likely Emotion + Explanation") | |
| # Dropdown autofill | |
| dropdown.change(fn=lambda x: x, inputs=dropdown, outputs=text_input) | |
| run_button.click(fn=emotion_annotator, inputs=text_input, outputs=[candidate_output, final_output]) | |
| demo.launch() | |