File size: 1,797 Bytes
e15150a 23ceab2 bb601b1 10e9b7d 23ceab2 3c4371f 23ceab2 3c4371f 23ceab2 e80aab9 23ceab2 3c4371f 23ceab2 31243f4 23ceab2 31243f4 23ceab2 31243f4 23ceab2 31243f4 23ceab2 e15150a 23ceab2 e80aab9 23ceab2 e80aab9 23ceab2 e80aab9 23ceab2 e80aab9 23ceab2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
import json
import gradio as gr
from transformers import pipeline
# Load a lightweight, instruction-tuned model
generator = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-beta", device_map="auto")
def generate_answer(question):
prompt = f"Answer the following question accurately and concisely:\nQuestion: {question}\nAnswer:"
result = generator(prompt, max_new_tokens=50, do_sample=False, pad_token_id=50256)
# Extract only the answer part
answer = result[0]["generated_text"].split("Answer:")[-1].strip()
return answer
def run_agent():
# Load 20 tasks from the expected input file
with open("questions.json", "r") as f:
tasks = json.load(f)
results = []
for task in tasks:
question = task["question"]
task_id = task["task_id"]
# Generate answer
try:
answer = generate_answer(question)
except Exception as e:
answer = "Unable to answer this question."
print(f"\nTask ID: {task_id}\nQuestion: {question}\nAnswer: {answer}\n")
results.append({
"task_id": task_id,
"submitted_answer": answer
})
# Final output structure
submission = {
"username": "alexputhiyadom",
"agent_code": "https://huggingface.co/spaces/alexputhiyadom/Final_Assignment_Template/tree/main",
"answers": results
}
# Save answers to file
with open("answers.json", "w") as f:
json.dump(submission, f, indent=2)
return submission
# Gradio UI
iface = gr.Interface(
fn=run_agent,
inputs=[],
outputs="json",
title="GAIA Smart Agent (Zephyr)",
description="Click 'Run' to evaluate 20 questions using an optimized agent."
)
if __name__ == "__main__":
iface.launch(share=False)
|