hydffgg commited on
Commit
efb0bd0
·
verified ·
1 Parent(s): 20122ba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -15
app.py CHANGED
@@ -16,20 +16,25 @@ model = AutoModelForCausalLM.from_pretrained(
16
  )
17
 
18
  # ======================
19
- # Clean output (FIX ký tự rác)
20
  # ======================
21
- def clean_output(text: str) -> str:
22
- # Cắt câu khi gặp dấu kết thúc hợp lệ
23
- match = re.match(r"^[\s\S]*?[.!?\n]", text)
24
- if match:
25
- return match.group(0).strip()
26
- return text.strip()
 
27
 
28
  # ======================
29
  # Chat function
30
  # ======================
31
  def chat(message, history):
32
- prompt = "<bos><start_of_turn>user\n" + message + "\n<end_of_turn>\n<start_of_turn>model\n"
 
 
 
 
33
 
34
  inputs = tokenizer(prompt, return_tensors="pt")
35
 
@@ -45,24 +50,23 @@ def chat(message, history):
45
  )
46
 
47
  decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
48
-
49
  reply = decoded.split("model")[-1].strip()
50
  reply = clean_output(reply)
51
 
52
  return reply
53
 
54
  # ======================
55
- # UI
56
  # ======================
57
  demo = gr.ChatInterface(
58
  fn=chat,
59
  title="🤖 Gemma3 270M Cloud Chat",
60
  description="Gemma3 270M chạy cloud miễn phí trên Hugging Face Spaces",
61
- examples=["hi", "giải thích AI là gì", "hello world trong python"],
62
- submit_btn="Send",
63
- retry_btn="Retry",
64
- undo_btn="Undo",
65
- clear_btn="Clear"
66
  )
67
 
68
  if __name__ == "__main__":
 
16
  )
17
 
18
  # ======================
19
+ # Clean output (fix ký tự rác)
20
  # ======================
21
+ def clean_output(text):
22
+ text = text.strip()
23
+ # Cắt phần dư sau câu hoàn chỉnh
24
+ for end in [".", "!", "?", "\n"]:
25
+ if end in text:
26
+ return text.split(end)[0] + end
27
+ return text
28
 
29
  # ======================
30
  # Chat function
31
  # ======================
32
  def chat(message, history):
33
+ prompt = (
34
+ "<bos><start_of_turn>user\n"
35
+ + message
36
+ + "\n<end_of_turn>\n<start_of_turn>model\n"
37
+ )
38
 
39
  inputs = tokenizer(prompt, return_tensors="pt")
40
 
 
50
  )
51
 
52
  decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
53
  reply = decoded.split("model")[-1].strip()
54
  reply = clean_output(reply)
55
 
56
  return reply
57
 
58
  # ======================
59
+ # UI (SAFE MODE)
60
  # ======================
61
  demo = gr.ChatInterface(
62
  fn=chat,
63
  title="🤖 Gemma3 270M Cloud Chat",
64
  description="Gemma3 270M chạy cloud miễn phí trên Hugging Face Spaces",
65
+ examples=[
66
+ "hi",
67
+ "AI là gì?",
68
+ "viết hello world bằng python"
69
+ ]
70
  )
71
 
72
  if __name__ == "__main__":