Spaces:
Sleeping
Sleeping
Sandhya
commited on
Commit
·
94b323b
1
Parent(s):
c886e14
First Commit
Browse files- app.py +6 -12
- requirements.txt +1 -0
app.py
CHANGED
|
@@ -56,10 +56,8 @@ async def startup_event():
|
|
| 56 |
|
| 57 |
|
| 58 |
|
| 59 |
-
|
| 60 |
global agent_instance
|
| 61 |
-
if agent_instance is None:
|
| 62 |
-
agent_instance = await get_agent()
|
| 63 |
prompt=f"""You're an assistant helping with hugging face model cards.
|
| 64 |
First, run the tool `read_model_card` on repo_id `{model_id}` to get the model card.
|
| 65 |
Then answer this user question based on the model card:
|
|
@@ -67,15 +65,10 @@ async def chat_function(user_message, history, model_id):
|
|
| 67 |
history = history + [(user_message, None)]
|
| 68 |
try:
|
| 69 |
response = ""
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
except TypeError:
|
| 75 |
-
for output in agent_instance.run(prompt):
|
| 76 |
-
if hasattr(output, "content") and output.content:
|
| 77 |
-
response = output.content
|
| 78 |
-
|
| 79 |
final_response = response or "⚠️ Sorry, I couldn't generate a response."
|
| 80 |
history[-1] = (user_message, final_response)
|
| 81 |
except Exception as e:
|
|
@@ -85,6 +78,7 @@ async def chat_function(user_message, history, model_id):
|
|
| 85 |
|
| 86 |
|
| 87 |
|
|
|
|
| 88 |
def create_gradio_app():
|
| 89 |
with gr.Blocks(title="Model Card Chatbot") as demo:
|
| 90 |
gr.Markdown("## 🤖 Model Card Chatbot\nAsk questions about Hugging Face model card")
|
|
|
|
| 56 |
|
| 57 |
|
| 58 |
|
| 59 |
+
def chat_function(user_message, history, model_id):
|
| 60 |
global agent_instance
|
|
|
|
|
|
|
| 61 |
prompt=f"""You're an assistant helping with hugging face model cards.
|
| 62 |
First, run the tool `read_model_card` on repo_id `{model_id}` to get the model card.
|
| 63 |
Then answer this user question based on the model card:
|
|
|
|
| 65 |
history = history + [(user_message, None)]
|
| 66 |
try:
|
| 67 |
response = ""
|
| 68 |
+
for output in agent_instance.run(prompt):
|
| 69 |
+
if hasattr(output, "content") and output.content:
|
| 70 |
+
response = output.content
|
| 71 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
final_response = response or "⚠️ Sorry, I couldn't generate a response."
|
| 73 |
history[-1] = (user_message, final_response)
|
| 74 |
except Exception as e:
|
|
|
|
| 78 |
|
| 79 |
|
| 80 |
|
| 81 |
+
|
| 82 |
def create_gradio_app():
|
| 83 |
with gr.Blocks(title="Model Card Chatbot") as demo:
|
| 84 |
gr.Markdown("## 🤖 Model Card Chatbot\nAsk questions about Hugging Face model card")
|
requirements.txt
CHANGED
|
@@ -4,4 +4,5 @@ gradio==5.38.0
|
|
| 4 |
gradio_client==1.11.0
|
| 5 |
huggingface-hub==0.33.4
|
| 6 |
uvicorn==0.35.0
|
|
|
|
| 7 |
|
|
|
|
| 4 |
gradio_client==1.11.0
|
| 5 |
huggingface-hub==0.33.4
|
| 6 |
uvicorn==0.35.0
|
| 7 |
+
python-dotenv
|
| 8 |
|