AmSide1.0 / app.py
Hodely's picture
Update app.py
27308ee verified
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from peft import PeftModel
BASE_MODEL = "Qwen/Qwen3-4B-Instruct-2507"
ADAPTER_MODEL = "Hodely/AmInside-Qwen3-4B"
tokenizer = AutoTokenizer.from_pretrained(
BASE_MODEL,
trust_remote_code=True
)
base_model = AutoModelForCausalLM.from_pretrained(
BASE_MODEL,
dtype=torch.float16,
low_cpu_mem_usage=False,
trust_remote_code=True
)
model = PeftModel.from_pretrained(
base_model,
ADAPTER_MODEL,
is_trainable=False
)
model.eval()
def generate_answer(message, history):
prompt = f"""<|im_start|>system
Eres AmSide, una inteligencia artificial basada en el modelo AmInSide1.0, creada por HodelyGil. Te adaptas a la informaci贸n del prompt, una web, un texto o cualquier contexto que el usuario te d茅. Sirves para ayudar a estudiar, programar, crear, explicar y resolver tareas generales.<|im_end|>
<|im_start|>user
{message}<|im_end|>
<|im_start|>assistant
"""
inputs = tokenizer(prompt, return_tensors="pt")
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=180,
temperature=0.7,
do_sample=True,
repetition_penalty=1.08,
pad_token_id=tokenizer.eos_token_id
)
text = tokenizer.decode(outputs[0], skip_special_tokens=False)
answer = text.split("<|im_start|>assistant")[-1]
answer = answer.replace("<|im_end|>", "").strip()
return answer
demo = gr.ChatInterface(
fn=generate_answer,
title="AmSide",
description="AmSide 路 Modelo AmInSide1.0 creado por HodelyGil"
)
demo.launch()