ewernn commited on
Commit
274cf02
·
verified ·
1 Parent(s): c518ef9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -25
app.py CHANGED
@@ -2,9 +2,7 @@ import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  from peft import PeftModel
4
 
5
- # Load base model
6
  base_model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct")
7
- # Load LoRA adapters
8
  model = PeftModel.from_pretrained(base_model, "ewernn/perfect-refusal-model")
9
  tokenizer = AutoTokenizer.from_pretrained("ewernn/perfect-refusal-model")
10
 
@@ -15,26 +13,4 @@ def chat(message, history):
15
  response = tokenizer.decode(outputs[0], skip_special_tokens=True).split("model\n")[-1]
16
  return response.replace("<end_of_turn>", "").strip()
17
 
18
- # Minimal theme
19
- theme = gr.themes.Soft(
20
- primary_hue="slate",
21
- font=["system-ui", "sans-serif"]
22
- ).set(
23
- body_background_fill="*neutral_50",
24
- body_background_fill_dark="*neutral_900",
25
- )
26
-
27
- # Ultra-minimal interface
28
- demo = gr.ChatInterface(
29
- fn=chat,
30
- type="messages",
31
- theme=theme,
32
- fill_height=False,
33
- show_share_button=False,
34
- submit_btn="→",
35
- retry_btn=None,
36
- undo_btn=None,
37
- clear_btn=None,
38
- )
39
-
40
- demo.launch()
 
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  from peft import PeftModel
4
 
 
5
  base_model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct")
 
6
  model = PeftModel.from_pretrained(base_model, "ewernn/perfect-refusal-model")
7
  tokenizer = AutoTokenizer.from_pretrained("ewernn/perfect-refusal-model")
8
 
 
13
  response = tokenizer.decode(outputs[0], skip_special_tokens=True).split("model\n")[-1]
14
  return response.replace("<end_of_turn>", "").strip()
15
 
16
+ gr.ChatInterface(chat).launch()