Atypical281795 commited on
Commit
eb006b4
·
verified ·
1 Parent(s): 62d94e1

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +106 -0
  2. requirements.txt +6 -0
app.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import torch
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
5
+ from peft import PeftModel
6
+
7
+ print("=== Application Starting (LoRA Mode with Quantization) ===")
8
+
9
+ try:
10
+ # 1. 設定 Base Model (基礎模型)
11
+ BASE_MODEL_ID = "QLU-NLP/BianCang-Qwen2.5-7B"
12
+
13
+ # 2. 自動偵測 Adapter (微調權重) 路徑
14
+ if os.path.exists("BianCang-Qwen2.5-7B-Instruct_finetuned_model_1"):
15
+ ADAPTER_PATH = "BianCang-Qwen2.5-7B-Instruct_finetuned_model_1"
16
+ else:
17
+ ADAPTER_PATH = "."
18
+
19
+ print(f"Base Model: {BASE_MODEL_ID}")
20
+ print(f"Adapter Path: {ADAPTER_PATH}")
21
+
22
+ # 3. 載入 Tokenizer
23
+ print("Loading Tokenizer...")
24
+ tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL_ID, trust_remote_code=True)
25
+
26
+ # 4. 載入 Base Model (使用 4-bit 量化以節省 VRAM,適合 T4 GPU)
27
+ print("Loading Base Model with 4-bit quantization...")
28
+ quantization_config = BitsAndBytesConfig(
29
+ load_in_4bit=True,
30
+ bnb_4bit_compute_dtype=torch.float16,
31
+ bnb_4bit_use_double_quant=True,
32
+ bnb_4bit_quant_type="nf4"
33
+ )
34
+
35
+ try:
36
+ base_model = AutoModelForCausalLM.from_pretrained(
37
+ BASE_MODEL_ID,
38
+ quantization_config=quantization_config,
39
+ device_map="auto",
40
+ trust_remote_code=True,
41
+ offload_folder="offload" # 加入 offload 資料夾以防萬一
42
+ )
43
+ except Exception as e:
44
+ print(f"GPU load failed: {e}. Fallback to CPU.")
45
+ base_model = AutoModelForCausalLM.from_pretrained(
46
+ BASE_MODEL_ID,
47
+ device_map="cpu",
48
+ trust_remote_code=True
49
+ )
50
+
51
+ # 5. 掛載 LoRA Adapter
52
+ print("Loading LoRA Adapter...")
53
+ try:
54
+ model = PeftModel.from_pretrained(
55
+ base_model,
56
+ ADAPTER_PATH,
57
+ offload_folder="offload" # 加入 offload 資料夾解決 Peft 報錯
58
+ )
59
+ print("LoRA Adapter loaded successfully!")
60
+ except Exception as e:
61
+ print(f"Failed to load adapter: {e}")
62
+ print("Running with Base Model only as fallback.")
63
+ model = base_model
64
+
65
+ def predict(message, history):
66
+ # 構建 Prompt
67
+ system_prompt = "你是一個專業的中醫藥材知識助手。你具備深厚的中醫理論基礎,特別擅長中藥材的性味、歸經、功效與主治。"
68
+
69
+ messages = [{"role": "system", "content": system_prompt}]
70
+ for human, assistant in history:
71
+ messages.append({"role": "user", "content": human})
72
+ messages.append({"role": "assistant", "content": assistant})
73
+ messages.append({"role": "user", "content": message})
74
+
75
+ text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
76
+ model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
77
+
78
+ generated_ids = model.generate(
79
+ model_inputs.input_ids,
80
+ max_new_tokens=512,
81
+ temperature=0.7,
82
+ top_p=0.9,
83
+ do_sample=True
84
+ )
85
+
86
+ generated_ids = [
87
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
88
+ ]
89
+
90
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
91
+ return response
92
+
93
+ # 建立 Gradio 介面
94
+ demo = gr.ChatInterface(
95
+ fn=predict,
96
+ title="BianCang-Qwen2.5-7B TCM Chatbot",
97
+ description="中醫藥材知識微調模型 (4-bit LoRA)"
98
+ )
99
+
100
+ if __name__ == "__main__":
101
+ # 移除 Gradio 4.x 不支援的 show_api 參數
102
+ demo.launch(server_name="0.0.0.0", server_port=7860)
103
+
104
+ except Exception as e:
105
+ print(f"!!! CRITICAL ERROR ===\n{e}\n======================")
106
+ raise e
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ transformers>=4.46.0
2
+ accelerate>=0.26.0
3
+ gradio>=4.0.0
4
+ peft>=0.7.0
5
+ scipy
6
+ bitsandbytes