Arthur Samuel Galego Panucci FIgueiredo commited on
Commit
ee3fc1f
·
verified ·
1 Parent(s): 62705d1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -61
app.py CHANGED
@@ -1,70 +1,63 @@
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
3
 
 
 
4
 
5
- def respond(
6
- message,
7
- history: list[dict[str, str]],
8
- system_message,
9
- max_tokens,
10
- temperature,
11
- top_p,
12
- hf_token: gr.OAuthToken,
13
- ):
14
- """
15
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
16
- """
17
- client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
18
 
19
- messages = [{"role": "system", "content": system_message}]
 
20
 
21
- messages.extend(history)
22
-
23
- messages.append({"role": "user", "content": message})
24
-
25
- response = ""
26
-
27
- for message in client.chat_completion(
28
- messages,
29
- max_tokens=max_tokens,
30
- stream=True,
31
- temperature=temperature,
32
- top_p=top_p,
33
- ):
34
- choices = message.choices
35
- token = ""
36
- if len(choices) and choices[0].delta.content:
37
- token = choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- chatbot = gr.ChatInterface(
47
- respond,
48
- type="messages",
49
- additional_inputs=[
50
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
51
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
52
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
53
- gr.Slider(
54
- minimum=0.1,
55
- maximum=1.0,
56
- value=0.95,
57
- step=0.05,
58
- label="Top-p (nucleus sampling)",
59
- ),
60
- ],
61
  )
62
 
63
- with gr.Blocks() as demo:
64
- with gr.Sidebar():
65
- gr.LoginButton()
66
- chatbot.render()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
 
68
 
69
- if __name__ == "__main__":
70
- demo.launch()
 
1
+ import torch
2
  import gradio as gr
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+ from peft import PeftModel
5
 
6
+ BASE_MODEL = "google/gemma-3-270m-it"
7
+ LORA_MODEL = "loboGOAT/DogeAI-v1.0-instruct" # seu LoRA
8
 
9
+ device = "cuda" if torch.cuda.is_available() else "cpu"
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
+ print("🔄 Loading tokenizer...")
12
+ tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL)
13
 
14
+ print("🔄 Loading base model...")
15
+ model = AutoModelForCausalLM.from_pretrained(
16
+ BASE_MODEL,
17
+ torch_dtype=torch.float16 if device == "cuda" else torch.float32,
18
+ device_map="auto"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  )
20
 
21
+ print("🔄 Applying LoRA...")
22
+ model = PeftModel.from_pretrained(model, LORA_MODEL)
23
+ model.eval()
24
+
25
+ def chat(user_input):
26
+ prompt = (
27
+ "<bos>\n"
28
+ "<start_of_turn>user\n"
29
+ f"{user_input}\n"
30
+ "<start_of_turn>model\n"
31
+ )
32
+
33
+ inputs = tokenizer(prompt, return_tensors="pt").to(device)
34
+
35
+ with torch.no_grad():
36
+ output = model.generate(
37
+ **inputs,
38
+ max_new_tokens=200,
39
+ do_sample=True,
40
+ temperature=0.7,
41
+ top_p=0.9,
42
+ repetition_penalty=1.1,
43
+ eos_token_id=tokenizer.eos_token_id,
44
+ pad_token_id=tokenizer.pad_token_id
45
+ )
46
+
47
+ text = tokenizer.decode(output[0], skip_special_tokens=True)
48
+
49
+ if "<start_of_turn>model" in text:
50
+ return text.split("<start_of_turn>model")[-1].strip()
51
+
52
+ return text.strip()
53
+
54
+ demo = gr.Interface(
55
+ fn=chat,
56
+ inputs=gr.Textbox(lines=4, placeholder="Talk to DogeAI 🐕"),
57
+ outputs="text",
58
+ title="DogeAI v1.0",
59
+ description="LoRA fine-tuned Gemma-3-270M-it • Created by Arthur"
60
+ )
61
 
62
+ demo.launch()
63