|
|
import json |
|
|
import gradio as gr |
|
|
from transformers import pipeline |
|
|
|
|
|
|
|
|
generator = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-beta", device_map="auto") |
|
|
|
|
|
def generate_answer(question): |
|
|
prompt = f"Answer the following question accurately and concisely:\nQuestion: {question}\nAnswer:" |
|
|
result = generator(prompt, max_new_tokens=50, do_sample=False, pad_token_id=50256) |
|
|
|
|
|
answer = result[0]["generated_text"].split("Answer:")[-1].strip() |
|
|
return answer |
|
|
|
|
|
def run_agent(): |
|
|
|
|
|
with open("questions.json", "r") as f: |
|
|
tasks = json.load(f) |
|
|
|
|
|
results = [] |
|
|
for task in tasks: |
|
|
question = task["question"] |
|
|
task_id = task["task_id"] |
|
|
|
|
|
|
|
|
try: |
|
|
answer = generate_answer(question) |
|
|
except Exception as e: |
|
|
answer = "Unable to answer this question." |
|
|
|
|
|
print(f"\nTask ID: {task_id}\nQuestion: {question}\nAnswer: {answer}\n") |
|
|
|
|
|
results.append({ |
|
|
"task_id": task_id, |
|
|
"submitted_answer": answer |
|
|
}) |
|
|
|
|
|
|
|
|
submission = { |
|
|
"username": "alexputhiyadom", |
|
|
"agent_code": "https://huggingface.co/spaces/alexputhiyadom/Final_Assignment_Template/tree/main", |
|
|
"answers": results |
|
|
} |
|
|
|
|
|
|
|
|
with open("answers.json", "w") as f: |
|
|
json.dump(submission, f, indent=2) |
|
|
|
|
|
return submission |
|
|
|
|
|
|
|
|
iface = gr.Interface( |
|
|
fn=run_agent, |
|
|
inputs=[], |
|
|
outputs="json", |
|
|
title="GAIA Smart Agent (Zephyr)", |
|
|
description="Click 'Run' to evaluate 20 questions using an optimized agent." |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
iface.launch(share=False) |
|
|
|