File size: 3,156 Bytes
7716dc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
from transformers import pipeline
import gradio as gr

# Step 1: Load model pipeline
generator = pipeline(
    "text-generation",
    model="HuggingFaceH4/zephyr-7b-alpha",
    max_new_tokens=512,
    temperature=0.7,
    do_sample=True,
    device_map="auto"
)

# Step 2: Agent functions
def chat_with_agent(role_description, prompt):
    system = f"[System: {role_description}]\n"
    input_text = system + prompt
    response = generator(input_text, return_full_text=False)[0]['generated_text']
    return response.replace(input_text, "").strip()

def get_topic():
    return chat_with_agent("You are a task manager deciding AI research paper topics.",
                           "Suggest a trending topic in AI for academic writing.")

def get_research(topic):
    return chat_with_agent("You are a research assistant gathering key points.",
                           f"Give me 5 major points about: {topic}")

def generate_draft(research_points):
    return chat_with_agent("You are a technical writer.",
                           f"Write a short 300-word academic draft based on:\n{research_points}")

def review_draft(draft):
    return chat_with_agent("You are a senior editor. Check for grammar, clarity, and flow.",
                           f"Please review and improve the following draft:\n\n{draft}")

def format_final(reviewed):
    return chat_with_agent("You are a publisher formatting for IEEE conference style.",
                           f"Format this content:\n{reviewed}")

# Step 3: Gradio function for full pipeline
def multi_agent_ui(topic_choice=None):
    topic = topic_choice if topic_choice else get_topic()
    research = get_research(topic)
    draft = generate_draft(research)
    reviewed = review_draft(draft)
    final_output = format_final(reviewed)

    return {
        "Topic": topic,
        "Research Points": research,
        "Draft": draft,
        "Reviewed Draft": reviewed,
        "Final Formatted Output": final_output
    }

# Step 4: Gradio UI
with gr.Blocks() as demo:
    gr.Markdown("## 🧠 AI Research Paper Assistant (Multi-Agent System)")
    
    with gr.Row():
        topic_input = gr.Textbox(label="Enter Topic (Optional)", placeholder="Leave blank to auto-generate")
        generate_btn = gr.Button("Generate Research Paper")
    
    with gr.Accordion("πŸ“Œ Topic", open=False):
        topic_output = gr.Textbox(label="Topic", lines=2)
    
    with gr.Accordion("πŸ“š Research Points", open=False):
        research_output = gr.Textbox(label="Research Points", lines=6)
    
    with gr.Accordion("πŸ“ Draft", open=False):
        draft_output = gr.Textbox(label="Draft", lines=10)
    
    with gr.Accordion("βœ… Reviewed", open=False):
        reviewed_output = gr.Textbox(label="Reviewed Draft", lines=10)
    
    with gr.Accordion("πŸ“„ Final Paper", open=True):
        final_output = gr.Textbox(label="Formatted Final Output", lines=12)

    generate_btn.click(
        fn=multi_agent_ui,
        inputs=[topic_input],
        outputs=[
            topic_output, research_output,
            draft_output, reviewed_output,
            final_output
        ]
    )

demo.launch()