from transformers import AutoTokenizer, AutoModelForCausalLM import torch import os hf_token = os.environ.get("HF_TOKEN") MODEL_REPO = "Rahul-8799/project_manager_gemma3" tokenizer = AutoTokenizer.from_pretrained(MODEL_REPO, trust_remote_code=True, token=hf_token) model = AutoModelForCausalLM.from_pretrained(MODEL_REPO, device_map="auto", torch_dtype=torch.float16, token=hf_token) model.eval() def run(prompt): inputs = tokenizer(prompt, return_tensors="pt").to(model.device) with torch.no_grad(): outputs = model.generate(**inputs, max_new_tokens=512) return tokenizer.decode(outputs[0], skip_special_tokens=True)