File size: 1,660 Bytes
a4aba75
 
27308ee
a4aba75
 
 
 
 
27308ee
 
 
 
a4aba75
 
 
27308ee
 
a4aba75
 
 
27308ee
 
 
 
a4aba75
 
27308ee
 
 
a4aba75
27308ee
a4aba75
 
 
 
 
27308ee
 
 
 
 
 
 
 
 
 
 
a4aba75
27308ee
 
a4aba75
 
 
 
27308ee
a4aba75
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from peft import PeftModel

BASE_MODEL = "Qwen/Qwen3-4B-Instruct-2507"
ADAPTER_MODEL = "Hodely/AmInside-Qwen3-4B"

tokenizer = AutoTokenizer.from_pretrained(
    BASE_MODEL,
    trust_remote_code=True
)

base_model = AutoModelForCausalLM.from_pretrained(
    BASE_MODEL,
    dtype=torch.float16,
    low_cpu_mem_usage=False,
    trust_remote_code=True
)

model = PeftModel.from_pretrained(
    base_model,
    ADAPTER_MODEL,
    is_trainable=False
)

model.eval()

def generate_answer(message, history):
    prompt = f"""<|im_start|>system
Eres AmSide, una inteligencia artificial basada en el modelo AmInSide1.0, creada por HodelyGil. Te adaptas a la información del prompt, una web, un texto o cualquier contexto que el usuario te dé. Sirves para ayudar a estudiar, programar, crear, explicar y resolver tareas generales.<|im_end|>
<|im_start|>user
{message}<|im_end|>
<|im_start|>assistant
"""

    inputs = tokenizer(prompt, return_tensors="pt")

    with torch.no_grad():
        outputs = model.generate(
            **inputs,
            max_new_tokens=180,
            temperature=0.7,
            do_sample=True,
            repetition_penalty=1.08,
            pad_token_id=tokenizer.eos_token_id
        )

    text = tokenizer.decode(outputs[0], skip_special_tokens=False)
    answer = text.split("<|im_start|>assistant")[-1]
    answer = answer.replace("<|im_end|>", "").strip()
    return answer

demo = gr.ChatInterface(
    fn=generate_answer,
    title="AmSide",
    description="AmSide · Modelo AmInSide1.0 creado por HodelyGil"
)

demo.launch()