|
|
import gradio as gr |
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
import torch |
|
|
|
|
|
|
|
|
userxd = "OrangyDev" |
|
|
model_id = f"{userxd}/godot4-expert-ai" |
|
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_id) |
|
|
|
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
|
model_id, |
|
|
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, |
|
|
device_map="auto" |
|
|
) |
|
|
|
|
|
|
|
|
def chat_godot(message, history): |
|
|
|
|
|
prompt = f"### User: {message}\n### Assistant:" |
|
|
inputs = tokenizer(prompt, return_tensors="pt").to(model.device) |
|
|
|
|
|
with torch.no_grad(): |
|
|
output = model.generate( |
|
|
**inputs, |
|
|
max_new_tokens=150, |
|
|
temperature=0.7, |
|
|
do_sample=True, |
|
|
repetition_penalty=1.2, |
|
|
eos_token_id=tokenizer.eos_token_id |
|
|
) |
|
|
|
|
|
full_text = tokenizer.decode(output[0], skip_special_tokens=True) |
|
|
|
|
|
response = full_text.split("### Assistant:")[-1].strip() |
|
|
return response |
|
|
|
|
|
|
|
|
demo = gr.ChatInterface( |
|
|
fn=chat_godot, |
|
|
title="Godot 4 Expert AI", |
|
|
description=f"IA entrenada por {userxd} para resolver dudas de Godot 4 y GDScript.", |
|
|
examples=["ΒΏQuiΓ©n es tu creador?", "ΒΏQuiΓ©n es Rafa Laguna?", "ΒΏCΓ³mo muevo un personaje en Godot 4?"], |
|
|
theme="soft" |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch() |