Spaces:
No application file
No application file
| from fastapi import FastAPI | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| import torch | |
| app = FastAPI() | |
| # Load Llama 2 model | |
| MODEL_NAME = "meta-llama/Llama-2-7b-chat-hf" | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) | |
| model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16, device_map="auto") | |
| async def generate_text(data: dict): | |
| prompt = data.get("prompt", "") | |
| if not prompt: | |
| return {"error": "No prompt provided"} | |
| inputs = tokenizer(prompt, return_tensors="pt").to("cuda") | |
| output = model.generate(**inputs, max_length=200) | |
| response = tokenizer.decode(output[0], skip_special_tokens=True) | |
| return {"generated_text": response} | |