Spaces:
Runtime error
Runtime error
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| import torch | |
| from langchain_core.messages import AIMessage | |
| import asyncio | |
| from typing import Generator, Dict, Any | |
| MODEL_REPO = "Rahul-8799/ui_designer_mistral" | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_REPO, trust_remote_code=True) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| MODEL_REPO, | |
| torch_dtype=torch.float16, | |
| device_map="auto" | |
| ) | |
| async def stream_inference(prompt: str) -> Generator[str, None, None]: | |
| """Stream the model's output token by token""" | |
| input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device) | |
| for _ in range(100): | |
| output_ids = model.generate( | |
| input_ids, | |
| max_new_tokens=1, | |
| pad_token_id=tokenizer.eos_token_id | |
| ) | |
| new_token = output_ids[0][-1] | |
| if new_token == tokenizer.eos_token_id: | |
| break | |
| token_text = tokenizer.decode([new_token]) | |
| yield token_text | |
| input_ids = output_ids | |
| await asyncio.sleep(0.05) | |
| async def run(state: Dict[str, Any]) -> Dict[str, Any]: | |
| """UI Designer creates responsive and interactive UI designs""" | |
| messages = state["messages"] | |
| prompt = messages[-1].content | |
| # Enhance the prompt with responsive design requirements | |
| enhanced_prompt = f""" | |
| Create a modern, responsive UI design following these requirements: | |
| 1. Mobile-first approach with responsive breakpoints | |
| 2. Modern CSS features (Flexbox, Grid, CSS Variables) | |
| 3. Interactive elements with JavaScript | |
| 4. Smooth animations and transitions | |
| 5. Accessibility features | |
| 6. Cross-browser compatibility | |
| Original requirements: {prompt} | |
| Provide the design in this format: | |
| ## Responsive Layout | |
| [Describe the responsive layout structure] | |
| ## CSS Framework | |
| [Specify CSS framework and custom styles] | |
| ## JavaScript Features | |
| [List interactive features and animations] | |
| ## Component Structure | |
| [Describe component hierarchy and relationships] | |
| """ | |
| # Stream the output | |
| output = "" | |
| async for token in stream_inference(enhanced_prompt): | |
| output += token | |
| return { | |
| "messages": [AIMessage(content=output)], | |
| "chat_log": state["chat_log"] + [{"role": "UI Designer", "content": output}], | |
| "ui_design_output": output, | |
| } |