aciang commited on
Commit
272b97f
·
verified ·
1 Parent(s): ce88c6f

Init/Update LanguageBridge Multimodal Chatbot Space

Browse files
Files changed (3) hide show
  1. README.md +9 -11
  2. app.py +140 -0
  3. requirements.txt +12 -0
README.md CHANGED
@@ -1,12 +1,10 @@
1
- ---
2
- title: LanguageBridge Mistral7B Multimodal Chat
3
- emoji: 📈
4
- colorFrom: yellow
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 5.49.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
1
+ # LanguageBridge — Multimodal Chatbot (Mistral-7B)
 
 
 
 
 
 
 
 
 
2
 
3
+ 文字先穩定,影像/語音可用 USE_IMAGE/USE_AUDIO 變數開關。
4
+
5
+ - Core model: `aciang/mistral7b-tk-sft-20251019-merged`
6
+ - 若要啟用影像/語音:
7
+ - 進入 **Settings → Variables**,新增:
8
+ - `USE_IMAGE=1`
9
+ - `USE_AUDIO=1`
10
+ - 如遇套件相依衝突,可在 **Settings → Runtime** 切換到 **T4** 或 **A10G**。
app.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os, torch, time
3
+ import gradio as gr
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM
5
+
6
+ TITLE = os.getenv("SPACE_TITLE", "LanguageBridge — Multimodal Chatbot (Mistral-7B)")
7
+ MODEL_ID = os.getenv("MODEL_ID", "aciang/mistral7b-tk-sft-20251019-merged")
8
+ SYSTEM_PROMPT = os.getenv("SYSTEM_PROMPT",
9
+ "你是語言橋助教,回答務必:條列、準確、可引用部落知識與科學。必要時給出步驟清單。")
10
+
11
+ # 可用 Space Variables 覆蓋:USE_IMAGE=1 / USE_AUDIO=1
12
+ USE_IMAGE = os.getenv("USE_IMAGE", "0") in ("1", "true", "True")
13
+ USE_AUDIO = os.getenv("USE_AUDIO", "0") in ("1", "true", "True")
14
+
15
+ def load_llm():
16
+ dtype = torch.float16 if torch.cuda.is_available() else torch.float32
17
+ try:
18
+ model = AutoModelForCausalLM.from_pretrained(
19
+ MODEL_ID, torch_dtype=dtype, device_map="auto"
20
+ )
21
+ except Exception as e:
22
+ print(f"[Fallback CPU] load error: {e}")
23
+ model = AutoModelForCausalLM.from_pretrained(
24
+ MODEL_ID, torch_dtype=torch.float32, device_map=None
25
+ )
26
+ tok = AutoTokenizer.from_pretrained(MODEL_ID)
27
+ return tok, model
28
+
29
+ tokenizer, llm = load_llm()
30
+ llm.eval()
31
+
32
+ def lazy_load_captioner():
33
+ try:
34
+ from transformers import BlipProcessor, BlipForConditionalGeneration
35
+ cap_id = os.getenv("CAPTION_MODEL_ID", "Salesforce/blip-image-captioning-base")
36
+ proc = BlipProcessor.from_pretrained(cap_id)
37
+ vmod = BlipForConditionalGeneration.from_pretrained(
38
+ cap_id, torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
39
+ )
40
+ if torch.cuda.is_available():
41
+ vmod = vmod.to("cuda")
42
+ return proc, vmod
43
+ except Exception as e:
44
+ print(f"[Image OFF] {e}")
45
+ return None, None
46
+
47
+ CAP_PROC, CAP_MOD = (None, None)
48
+ if USE_IMAGE:
49
+ CAP_PROC, CAP_MOD = lazy_load_captioner()
50
+
51
+ def lazy_load_asr():
52
+ try:
53
+ import whisper
54
+ asr_id = os.getenv("ASR_MODEL_ID", "tiny")
55
+ return whisper.load_model(asr_id)
56
+ except Exception as e:
57
+ print(f"[Audio OFF] {e}")
58
+ return None
59
+
60
+ ASR = lazy_load_asr() if USE_AUDIO else None
61
+
62
+ @torch.inference_mode()
63
+ def generate_reply(history, image, audio, max_new_tokens, temperature, top_p):
64
+ sys_prompt = SYSTEM_PROMPT
65
+ user_parts = []
66
+
67
+ if image is not None and USE_IMAGE and CAP_PROC and CAP_MOD:
68
+ try:
69
+ from PIL import Image
70
+ im = Image.open(image).convert("RGB")
71
+ inputs = CAP_PROC(im, return_tensors="pt").to(CAP_MOD.device)
72
+ out = CAP_MOD.generate(**inputs, max_new_tokens=64)
73
+ cap = CAP_PROC.decode(out[0], skip_special_tokens=True)
74
+ user_parts.append(f"[影像描述] {cap}")
75
+ except Exception as e:
76
+ user_parts.append(f"[影像處理失敗: {e}]")
77
+
78
+ if audio is not None and USE_AUDIO and ASR is not None:
79
+ try:
80
+ result = ASR.transcribe(audio, fp16=torch.cuda.is_available())
81
+ user_parts.append(f"[語音辨識] {result.get('text','')}")
82
+ except Exception as e:
83
+ user_parts.append(f"[語音處理失敗: {e}]")
84
+
85
+ text = ""
86
+ for role, msg in reversed(history or []):
87
+ if role == "user":
88
+ text = msg.get("content", "") if isinstance(msg, dict) else str(msg)
89
+ break
90
+ if user_parts:
91
+ text = (text + "\n" if text else "") + "\n".join(user_parts)
92
+
93
+ prompt = f"{sys_prompt}\n\n使用者:{text}\n助教:"
94
+ inputs = tokenizer(prompt, return_tensors="pt").to(llm.device)
95
+ out = llm.generate(
96
+ **inputs,
97
+ max_new_tokens=int(max_new_tokens),
98
+ temperature=float(temperature),
99
+ top_p=float(top_p),
100
+ do_sample=True,
101
+ eos_token_id=tokenizer.eos_token_id,
102
+ pad_token_id=tokenizer.eos_token_id
103
+ )
104
+ ans = tokenizer.decode(out[0], skip_special_tokens=True)
105
+ if "助教:" in ans:
106
+ ans = ans.split("助教:", 1)[-1].strip()
107
+ return ans
108
+
109
+ with gr.Blocks(title=TITLE, fill_height=True) as demo:
110
+ gr.Markdown(f"## {TITLE}\n- Core model: `{MODEL_ID}`\n- 影像/語音預設關閉(可在 Space Variables: `USE_IMAGE=1` / `USE_AUDIO=1` 開啟)")
111
+ with gr.Row():
112
+ chat = gr.Chatbot(height=450, type="messages", show_copy_button=True)
113
+ with gr.Column(scale=0):
114
+ user_txt = gr.Textbox(label="你的問題 / 指令", placeholder="請輸入文字…", interactive=True)
115
+ img = gr.Image(label="(可選) 上傳圖片", type="filepath", visible=bool(USE_IMAGE), interactive=True)
116
+ aud = gr.Audio(label="(可選) 上傳語音", type="filepath", sources=["upload","microphone"], visible=bool(USE_AUDIO), interactive=True)
117
+ mx = gr.Slider(64, 1024, value=512, step=32, label="max_new_tokens")
118
+ tp = gr.Slider(0.1, 1.2, value=0.6, step=0.05, label="temperature")
119
+ top = gr.Slider(0.5, 1.0, value=0.95, step=0.01, label="top_p")
120
+ btn = gr.Button("送出 🚀", variant="primary")
121
+ clr = gr.Button("清除對話")
122
+
123
+ def respond(history, text, image, audio, mx, tp, top):
124
+ history = history or []
125
+ history.append(("user", {"content": text}))
126
+ try:
127
+ ans = generate_reply(history, image, audio, mx, tp, top)
128
+ except Exception as e:
129
+ ans = f"(推理失敗:{e})"
130
+ history.append(("assistant", {"content": ans}))
131
+ return history, ""
132
+
133
+ btn.click(respond, inputs=[chat, user_txt, img, aud, mx, tp, top], outputs=[chat, user_txt])
134
+ clr.click(lambda: ([], ""), outputs=[chat, user_txt])
135
+
136
+ try:
137
+ demo.queue().launch(share=False, show_error=True, show_api=False)
138
+ except Exception as e:
139
+ print(f"[local 失敗,改用 share=True] {e}")
140
+ demo.queue().launch(share=True, show_error=True, show_api=False)
requirements.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio>=4.44.0
2
+ transformers>=4.44.0
3
+ accelerate>=0.31.0
4
+ bitsandbytes
5
+ torch
6
+ huggingface_hub
7
+ # 若要啟用影像/語音,請於 Space Variables 設 USE_IMAGE/USE_AUDIO=1,
8
+ # 並可視情況取消註解下列依賴(或在 Runtime 選擇 T4/A10G)
9
+ # pillow
10
+ # torchaudio
11
+ # soundfile
12
+ # ffmpeg-python