Multi_agent_test / agents /software_engineer_agent.py
Rahul-8799's picture
Upload 2 files
62d0613 verified
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
from langchain_core.messages import AIMessage
import asyncio
from typing import Generator, Dict, Any
MODEL_REPO = "Rahul-8799/software_engineer_mellum"
tokenizer = AutoTokenizer.from_pretrained(MODEL_REPO, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
MODEL_REPO,
torch_dtype=torch.float16,
device_map="auto"
)
async def stream_inference(prompt: str) -> Generator[str, None, None]:
"""Stream the model's output token by token"""
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device)
for _ in range(100):
output_ids = model.generate(
input_ids,
max_new_tokens=1,
pad_token_id=tokenizer.eos_token_id
)
new_token = output_ids[0][-1]
if new_token == tokenizer.eos_token_id:
break
token_text = tokenizer.decode([new_token])
yield token_text
input_ids = output_ids
await asyncio.sleep(0.05)
async def run(state: Dict[str, Any]) -> Dict[str, Any]:
"""Software Engineer generates responsive and interactive UI code"""
messages = state["messages"]
prompt = messages[-1].content
# Enhance the prompt with modern web development requirements
enhanced_prompt = f"""
Generate modern, responsive, and interactive UI code following these requirements:
1. Use Tailwind CSS for responsive design
2. Implement JavaScript for interactivity
3. Add smooth animations and transitions
4. Ensure mobile-first approach
5. Include proper error handling
6. Add loading states and feedback
Original requirements: {prompt}
Generate the following files:
1. index.html - Main HTML structure
2. styles.css - Custom styles (if needed beyond Tailwind)
3. script.js - Interactive features
4. tailwind.config.js - Tailwind configuration
Format the output as:
## HTML Structure
```html
[HTML code]
```
## CSS Styles
```css
[CSS code]
```
## JavaScript
```javascript
[JavaScript code]
```
## Tailwind Config
```javascript
[Tailwind configuration]
```
"""
# Stream the output
output = ""
async for token in stream_inference(enhanced_prompt):
output += token
return {
"messages": [AIMessage(content=output)],
"chat_log": state["chat_log"] + [{"role": "Software Engineer", "content": output}],
"dev_output": output,
}