faizee07 commited on
Commit
c4cbbd2
Β·
verified Β·
1 Parent(s): 1b5e96c

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +294 -0
  2. requirements.txt +8 -0
app.py ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
+ import torch
4
+ from concurrent.futures import ThreadPoolExecutor, as_completed
5
+ import json
6
+ import time
7
+ from datetime import datetime
8
+
9
+ # Configuration for different agent types
10
+ AGENT_CONFIGS = {
11
+ "researcher": {
12
+ "model": "HuggingFaceH4/zephyr-7b-beta",
13
+ "role": "Research and gather information",
14
+ "prompt_template": "You are a research agent. Analyze and provide detailed information about: {task}"
15
+ },
16
+ "coder": {
17
+ "model": "Salesforce/codegen-350M-mono",
18
+ "role": "Generate and explain code",
19
+ "prompt_template": "Generate Python code for: {task}"
20
+ },
21
+ "analyzer": {
22
+ "model": "HuggingFaceH4/zephyr-7b-beta",
23
+ "role": "Analyze data and provide insights",
24
+ "prompt_template": "Analyze the following and provide insights: {task}"
25
+ },
26
+ "writer": {
27
+ "model": "HuggingFaceH4/zephyr-7b-beta",
28
+ "role": "Create content and documentation",
29
+ "prompt_template": "Write professional content about: {task}"
30
+ }
31
+ }
32
+
33
+ class AgentSystem:
34
+ def __init__(self):
35
+ self.models = {}
36
+ self.tokenizers = {}
37
+ self.executor = ThreadPoolExecutor(max_workers=4)
38
+ self.device = "cuda" if torch.cuda.is_available() else "cpu"
39
+ print(f"Using device: {self.device}")
40
+
41
+ def load_model(self, agent_name, model_name):
42
+ """Load model for specific agent"""
43
+ if agent_name not in self.models:
44
+ print(f"Loading {agent_name} model: {model_name}")
45
+ try:
46
+ self.tokenizers[agent_name] = AutoTokenizer.from_pretrained(model_name)
47
+ self.models[agent_name] = AutoModelForCausalLM.from_pretrained(
48
+ model_name,
49
+ torch_dtype=torch.float16 if self.device == "cuda" else torch.float32,
50
+ low_cpu_mem_usage=True,
51
+ device_map="auto" if self.device == "cuda" else None
52
+ )
53
+ print(f"{agent_name} model loaded successfully!")
54
+ except Exception as e:
55
+ print(f"Error loading {agent_name} model: {e}")
56
+ # Fallback to smaller model
57
+ print(f"Falling back to distilgpt2 for {agent_name}")
58
+ self.tokenizers[agent_name] = AutoTokenizer.from_pretrained("distilgpt2")
59
+ self.models[agent_name] = AutoModelForCausalLM.from_pretrained("distilgpt2")
60
+
61
+ def generate_response(self, agent_name, prompt, max_length=200):
62
+ """Generate response for a specific agent"""
63
+ try:
64
+ config = AGENT_CONFIGS[agent_name]
65
+ model_name = config["model"]
66
+
67
+ # Load model if not already loaded
68
+ if agent_name not in self.models:
69
+ self.load_model(agent_name, model_name)
70
+
71
+ tokenizer = self.tokenizers[agent_name]
72
+ model = self.models[agent_name]
73
+
74
+ # Format prompt
75
+ formatted_prompt = config["prompt_template"].format(task=prompt)
76
+
77
+ # Tokenize
78
+ inputs = tokenizer(formatted_prompt, return_tensors="pt", truncation=True, max_length=512)
79
+
80
+ if self.device == "cuda":
81
+ inputs = inputs.to("cuda")
82
+
83
+ # Generate
84
+ with torch.no_grad():
85
+ outputs = model.generate(
86
+ inputs.input_ids,
87
+ max_length=max_length,
88
+ temperature=0.7,
89
+ top_p=0.9,
90
+ do_sample=True,
91
+ pad_token_id=tokenizer.eos_token_id
92
+ )
93
+
94
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
95
+
96
+ return {
97
+ "agent": agent_name,
98
+ "role": config["role"],
99
+ "response": response,
100
+ "status": "success"
101
+ }
102
+
103
+ except Exception as e:
104
+ return {
105
+ "agent": agent_name,
106
+ "role": AGENT_CONFIGS[agent_name]["role"],
107
+ "response": f"Error: {str(e)}",
108
+ "status": "error"
109
+ }
110
+
111
+ def run_agents_parallel(self, task, selected_agents, max_length=200):
112
+ """Run multiple agents in parallel"""
113
+ start_time = time.time()
114
+ futures = {}
115
+ results = []
116
+
117
+ # Submit tasks to thread pool
118
+ for agent_name in selected_agents:
119
+ future = self.executor.submit(
120
+ self.generate_response,
121
+ agent_name,
122
+ task,
123
+ max_length
124
+ )
125
+ futures[future] = agent_name
126
+
127
+ # Collect results as they complete
128
+ for future in as_completed(futures):
129
+ agent_name = futures[future]
130
+ try:
131
+ result = future.result()
132
+ result["time_taken"] = round(time.time() - start_time, 2)
133
+ results.append(result)
134
+ except Exception as e:
135
+ results.append({
136
+ "agent": agent_name,
137
+ "role": AGENT_CONFIGS[agent_name]["role"],
138
+ "response": f"Failed: {str(e)}",
139
+ "status": "error",
140
+ "time_taken": round(time.time() - start_time, 2)
141
+ })
142
+
143
+ total_time = round(time.time() - start_time, 2)
144
+ return results, total_time
145
+
146
+ # Initialize the agent system
147
+ print("Initializing AI Agent System...")
148
+ agent_system = AgentSystem()
149
+
150
+ def process_task(task, researcher, coder, analyzer, writer, max_length):
151
+ """Process task with selected agents"""
152
+ if not task.strip():
153
+ return "Please enter a task!", ""
154
+
155
+ # Determine which agents to use
156
+ selected_agents = []
157
+ if researcher:
158
+ selected_agents.append("researcher")
159
+ if coder:
160
+ selected_agents.append("coder")
161
+ if analyzer:
162
+ selected_agents.append("analyzer")
163
+ if writer:
164
+ selected_agents.append("writer")
165
+
166
+ if not selected_agents:
167
+ return "Please select at least one agent!", ""
168
+
169
+ # Run agents in parallel
170
+ results, total_time = agent_system.run_agents_parallel(task, selected_agents, max_length)
171
+
172
+ # Format output
173
+ output = f"# πŸ€– AI Agent System Results\n\n"
174
+ output += f"**Task:** {task}\n\n"
175
+ output += f"**Agents Used:** {len(selected_agents)} agents running in parallel\n\n"
176
+ output += f"**Total Time:** {total_time}s\n\n"
177
+ output += "---\n\n"
178
+
179
+ for result in results:
180
+ status_emoji = "βœ…" if result["status"] == "success" else "❌"
181
+ output += f"## {status_emoji} {result['agent'].upper()} Agent\n"
182
+ output += f"**Role:** {result['role']}\n\n"
183
+ output += f"**Response:**\n```\n{result['response']}\n```\n\n"
184
+ output += f"*Completed in {result['time_taken']}s*\n\n"
185
+ output += "---\n\n"
186
+
187
+ # Create summary
188
+ summary = {
189
+ "task": task,
190
+ "agents_used": len(selected_agents),
191
+ "total_time": total_time,
192
+ "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
193
+ }
194
+
195
+ return output, json.dumps(summary, indent=2)
196
+
197
+ # Create Gradio Interface
198
+ with gr.Blocks(theme=gr.themes.Soft(), title="AI Agent System") as demo:
199
+ gr.Markdown(
200
+ """
201
+ # πŸ€– Full-Stack AI Agent System
202
+
203
+ **Parallel AI Processing with Multiple Specialized Agents**
204
+
205
+ This system runs multiple AI agents simultaneously to process your tasks faster!
206
+ Each agent specializes in different areas and works in parallel.
207
+ """
208
+ )
209
+
210
+ with gr.Row():
211
+ with gr.Column(scale=1):
212
+ gr.Markdown("### πŸ“‹ Task Input")
213
+ task_input = gr.Textbox(
214
+ label="Enter Your Task",
215
+ placeholder="Example: Create a Python web scraper for news articles",
216
+ lines=4
217
+ )
218
+
219
+ gr.Markdown("### 🎯 Select Agents")
220
+ researcher_check = gr.Checkbox(label="πŸ” Researcher Agent", value=True, info="Research and gather information")
221
+ coder_check = gr.Checkbox(label="πŸ’» Coder Agent", value=True, info="Generate and explain code")
222
+ analyzer_check = gr.Checkbox(label="πŸ“Š Analyzer Agent", value=True, info="Analyze and provide insights")
223
+ writer_check = gr.Checkbox(label="✍️ Writer Agent", value=True, info="Create documentation")
224
+
225
+ max_length = gr.Slider(
226
+ minimum=100,
227
+ maximum=500,
228
+ value=200,
229
+ step=50,
230
+ label="Max Response Length",
231
+ info="Tokens per agent"
232
+ )
233
+
234
+ process_btn = gr.Button("πŸš€ Run Agents in Parallel", variant="primary", size="lg")
235
+
236
+ gr.Markdown(
237
+ """
238
+ ### πŸ’‘ Tips
239
+ - Select multiple agents for comprehensive results
240
+ - Agents run simultaneously for faster processing
241
+ - Each agent brings unique expertise
242
+ """
243
+ )
244
+
245
+ with gr.Column(scale=2):
246
+ gr.Markdown("### πŸ“€ Agent Outputs")
247
+ output_display = gr.Markdown(label="Results")
248
+
249
+ with gr.Accordion("πŸ“Š Execution Summary", open=False):
250
+ summary_json = gr.Code(label="JSON Summary", language="json")
251
+
252
+ gr.Markdown("### πŸ“š Example Tasks")
253
+ gr.Examples(
254
+ examples=[
255
+ ["Create a REST API for user authentication"],
256
+ ["Build a machine learning model for sentiment analysis"],
257
+ ["Design a database schema for an e-commerce platform"],
258
+ ["Write a technical blog post about microservices"],
259
+ ["Develop a real-time chat application"]
260
+ ],
261
+ inputs=task_input
262
+ )
263
+
264
+ gr.Markdown(
265
+ """
266
+ ---
267
+
268
+ ## πŸ—οΈ System Architecture
269
+
270
+ - **Parallel Processing**: All agents run simultaneously using ThreadPoolExecutor
271
+ - **Free Models**: Using Hugging Face hosted models (Zephyr-7B, CodeGen)
272
+ - **Specialized Agents**: Each agent has a specific role and expertise
273
+ - **Fault Tolerant**: Continues even if one agent fails
274
+
275
+ ## πŸ”§ Technology Stack
276
+
277
+ - **Frontend**: Gradio
278
+ - **Backend**: Python + Transformers
279
+ - **Models**: Hugging Face free models
280
+ - **Concurrency**: ThreadPoolExecutor for parallel processing
281
+ """
282
+ )
283
+
284
+ # Connect button to processing function
285
+ process_btn.click(
286
+ fn=process_task,
287
+ inputs=[task_input, researcher_check, coder_check, analyzer_check, writer_check, max_length],
288
+ outputs=[output_display, summary_json]
289
+ )
290
+
291
+ # Launch
292
+ if __name__ == "__main__":
293
+ demo.queue() # Enable queuing for better performance
294
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ gradio==4.16.0
2
+ transformers==4.36.2
3
+ torch==2.1.2
4
+ accelerate==0.25.0
5
+ sentencepiece==0.1.99
6
+ protobuf==3.20.3
7
+ bitsandbytes==0.41.3
8
+ scipy==1.11.4