GodeusAI-v1 / inference.py
yadnik's picture
Protocol-1
80493f3 verified
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from peft import PeftModel
# Load base model and tokenizer
base_model = "mistralai/Mistral-7B-v0.1"
tokenizer = AutoTokenizer.from_pretrained(base_model)
model = AutoModelForCausalLM.from_pretrained(
base_model,
torch_dtype=torch.float16,
device_map="auto"
)
# Load LoRA adapter
model = PeftModel.from_pretrained(model, "./")
# Ensure evaluation mode
model.eval()
def chat():
print("🕉️ Welcome to GodeusAI — your spiritual assistant. Type 'exit' to quit.\n")
while True:
user_input = input("You: ")
if user_input.lower() == "exit":
print("Goodbye.")
break
prompt = f"<|user|>: {user_input}\n<|assistant|>:"
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=200,
temperature=0.7,
top_p=0.9,
do_sample=True,
pad_token_id=tokenizer.eos_token_id
)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
print("GodeusAI:", response.split("<|assistant|>:")[-1].strip())
if __name__ == "__main__":
chat()