DronA23 commited on
Commit
2c20e27
·
verified ·
1 Parent(s): 3def293

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -8
app.py CHANGED
@@ -1,16 +1,25 @@
1
  import streamlit as st
2
  import pandas as pd
 
3
  from huggingface_hub import InferenceClient
4
 
5
  # Initialize HF Inference client
6
  @st.cache_resource
7
  def get_client():
8
- return InferenceClient(token=st.secrets["HF_TOKEN"])
 
 
 
 
 
 
 
 
9
 
10
  client = get_client()
11
 
12
  # Define the LLM call function
13
- def call_llama(prompt, model="meta-llama/Meta-Llama-3-8B-Instruct", max_tokens=500):
14
  try:
15
  response = client.text_generation(
16
  prompt=prompt,
@@ -33,7 +42,7 @@ with st.sidebar:
33
  st.markdown("""
34
  # 🦙🦙 Duel Agent Simulation
35
  ## Overview
36
- This app simulates an interview with two Llama3 AI agents:
37
  1. **Interviewer**: Asks questions about your topic
38
  2. **Interviewee**: Attempts to answer (poorly at first)
39
  3. **Judge**: Provides feedback after each answer
@@ -110,15 +119,12 @@ if submit_button:
110
  st.markdown(f"**Interviewee:** {answer}")
111
  st.session_state.messages.append({"role": "user", "content": f"Interviewee: {answer}"})
112
 
113
- # Judge evaluates - FIXED STRING FORMATTING HERE
114
  feedback_prompt = f"""Evaluate this interview exchange:
115
  Question: {question}
116
  Answer: {answer}
117
  Provide specific feedback and a score from 1-10 (10=best).
118
- Format your response as:
119
- "Feedback: [your feedback here]
120
- Score: [number between 1-10]"
121
- """
122
 
123
  judge_response = call_llama(feedback_prompt).strip()
124
 
 
1
  import streamlit as st
2
  import pandas as pd
3
+ import os
4
  from huggingface_hub import InferenceClient
5
 
6
  # Initialize HF Inference client
7
  @st.cache_resource
8
  def get_client():
9
+ # Try to get token from environment variables (works in Hugging Face Spaces)
10
+ hf_token = os.environ.get("HF_TOKEN")
11
+ if not hf_token:
12
+ try:
13
+ hf_token = st.secrets["HF_TOKEN"] # Fallback to Streamlit secrets
14
+ except:
15
+ st.error("HF_TOKEN not found. Please set it in your Space's settings or secrets.toml")
16
+ st.stop()
17
+ return InferenceClient(token=hf_token)
18
 
19
  client = get_client()
20
 
21
  # Define the LLM call function
22
+ def call_llama(prompt, model="mistralai/Mistral-7B-Instruct-v0.2", max_tokens=500):
23
  try:
24
  response = client.text_generation(
25
  prompt=prompt,
 
42
  st.markdown("""
43
  # 🦙🦙 Duel Agent Simulation
44
  ## Overview
45
+ This app simulates an interview with two AI agents:
46
  1. **Interviewer**: Asks questions about your topic
47
  2. **Interviewee**: Attempts to answer (poorly at first)
48
  3. **Judge**: Provides feedback after each answer
 
119
  st.markdown(f"**Interviewee:** {answer}")
120
  st.session_state.messages.append({"role": "user", "content": f"Interviewee: {answer}"})
121
 
122
+ # Judge evaluates
123
  feedback_prompt = f"""Evaluate this interview exchange:
124
  Question: {question}
125
  Answer: {answer}
126
  Provide specific feedback and a score from 1-10 (10=best).
127
+ Format: Feedback: [your feedback] Score: [1-10]"""
 
 
 
128
 
129
  judge_response = call_llama(feedback_prompt).strip()
130