# backend_agent/api_generator.py from transformers import AutoModelForCausalLM, AutoTokenizer import torch MODEL_NAME = "facebook/opt-1.3b" tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) model = AutoModelForCausalLM.from_pretrained(MODEL_NAME) def generate_backend_code_llm(task_name): """ Generates Python backend code (REST/GraphQL) for a given task using LLM. """ prompt = f""" You are an expert backend developer. Generate Python code for REST or GraphQL APIs for the following task: Task: {task_name} Include routes/resolvers, input validation, and placeholders for business logic. """ inputs = tokenizer(prompt, return_tensors="pt") outputs = model.generate(**inputs, max_new_tokens=350) code = tokenizer.decode(outputs[0], skip_special_tokens=True) if "Task:" in code: code = code.split("Task:")[-1].strip() return code