badanwang commited on
Commit
7f2c7f6
·
1 Parent(s): 4e54d22

Initial commit with app code only

Browse files
Files changed (3) hide show
  1. README.md +2 -1
  2. app.py +104 -34
  3. requirements.txt +5 -1
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: Teacher Basic Qwen3-0.6b
3
  emoji: 💬
4
  colorFrom: yellow
5
  colorTo: purple
@@ -7,6 +7,7 @@ sdk: gradio
7
  sdk_version: 5.0.1
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
  An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
 
1
  ---
2
+ title: Teacher Model Api
3
  emoji: 💬
4
  colorFrom: yellow
5
  colorTo: purple
 
7
  sdk_version: 5.0.1
8
  app_file: app.py
9
  pinned: false
10
+ short_description: 一个基于qwen3-0.6b训练的的文本分类模型(调用API)
11
  ---
12
 
13
  An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
app.py CHANGED
@@ -1,64 +1,134 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  def respond(
11
- message,
12
  history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
  ):
18
- messages = [{"role": "system", "content": system_message}]
 
 
 
 
 
 
 
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
 
 
 
 
 
 
 
26
  messages.append({"role": "user", "content": message})
27
 
28
- response = ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
 
 
 
 
 
 
 
 
 
 
 
 
34
  temperature=temperature,
35
  top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
 
 
 
38
 
39
- response += token
40
- yield response
 
 
 
 
 
 
 
 
 
 
41
 
42
 
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
  demo = gr.ChatInterface(
47
  respond,
48
  additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
  gr.Slider(
53
- minimum=0.1,
54
  maximum=1.0,
55
  value=0.95,
56
- step=0.05,
57
  label="Top-p (nucleus sampling)",
58
  ),
59
  ],
 
 
60
  )
61
 
62
 
63
  if __name__ == "__main__":
64
- demo.launch()
 
 
 
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
3
+ import torch
4
+ from threading import Thread
5
 
6
+ # --- 配置您的本地模型路径 ---
7
+ MODEL_PATH = "badanwang/teacher_basic_qwen3-0.6b" # 指向包含模型文件的文件夹
 
 
8
 
9
+ # --- 加载模型和分词器 ---
10
+ # 确保您的模型和分词器与您的任务兼容 (例如,文本生成/对话)
11
+ # 如果您的Space有GPU,模型会自动尝试使用GPU (如果transformers和torch配置正确)
12
+ try:
13
+ print(f"Loading tokenizer from: {MODEL_PATH}")
14
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
15
+ print(f"Loading model from: {MODEL_PATH}")
16
+ # 如果内存有限或希望在CPU上运行,可以添加 device_map="auto" 或 device_map="cpu"
17
+ # 对于较大的模型,device_map="auto" 可能需要 accelerate库
18
+ model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="cpu") # "auto" 会尝试使用GPU
19
+ print("Model and tokenizer loaded successfully.")
20
+ except Exception as e:
21
+ print(f"Error loading model or tokenizer: {e}")
22
+ # 可以在这里引发异常或设置一个标志,以便在Gradio界面中显示错误
23
+ tokenizer = None
24
+ model = None
25
 
26
  def respond(
27
+ message: str,
28
  history: list[tuple[str, str]],
29
+ system_message: str,
30
+ max_tokens: int,
31
+ temperature: float,
32
+ top_p: float,
33
  ):
34
+ if model is None or tokenizer is None:
35
+ yield "Error: Model or tokenizer not loaded. Please check the logs."
36
+ return
37
+
38
+ # 确保tokenizer有pad_token_id,如果和eos_token_id一样,这是常见的设置
39
+ if tokenizer.pad_token_id is None:
40
+ print("tokenizer.pad_token_id is None. Setting it to tokenizer.eos_token_id.")
41
+ tokenizer.pad_token_id = tokenizer.eos_token_id
42
 
 
 
 
 
 
43
 
44
+ messages = [{"role": "system", "content": system_message}]
45
+ for user_msg, assistant_msg in history:
46
+ if user_msg:
47
+ messages.append({"role": "user", "content": user_msg})
48
+ if assistant_msg:
49
+ messages.append({"role": "assistant", "content": assistant_msg})
50
  messages.append({"role": "user", "content": message})
51
 
52
+ try:
53
+ # 步骤 1: 使用聊天模板获取完整的提示字符串
54
+ # tokenize=False 表示我们先获取格式化后的字符串
55
+ prompt_string = tokenizer.apply_chat_template(
56
+ messages,
57
+ tokenize=False,
58
+ add_generation_prompt=True
59
+ )
60
+
61
+ # 步骤 2: 对格式化后的字符串进行分词,以获取 input_ids 和 attention_mask
62
+ # padding=True 和 truncation=True 是好的实践,虽然对于单个序列生成可能不是严格必须,
63
+ # 但如果模型期望固定长度或进行批处理,则非常重要。
64
+ # 对于单个序列,attention_mask 通常是全1(直到截断)。
65
+ inputs_dict = tokenizer(
66
+ prompt_string,
67
+ return_tensors="pt",
68
+ padding=False, # 对于单个序列生成,通常不需要填充,除非模型特别要求
69
+ truncation=True,
70
+ max_length=tokenizer.model_max_length if hasattr(tokenizer, 'model_max_length') else 2048 # 使用一个合理的最大长度
71
+ )
72
 
73
+ input_ids = inputs_dict.input_ids.to(model.device)
74
+ attention_mask = inputs_dict.attention_mask.to(model.device) # <--- 获取 attention_mask
75
+
76
+ except Exception as e:
77
+ print(f"Error tokenizing chat or applying template: {e}")
78
+ yield f"Error during input processing: {str(e)}"
79
+ return
80
+
81
+ streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
82
+
83
+ generation_kwargs = dict(
84
+ input_ids=input_ids, # <--- 使用 input_ids
85
+ attention_mask=attention_mask, # <--- 传递 attention_mask
86
+ streamer=streamer,
87
+ max_new_tokens=max_tokens,
88
+ do_sample=True,
89
  temperature=temperature,
90
  top_p=top_p,
91
+ pad_token_id=tokenizer.pad_token_id # 明确传递 pad_token_id
92
+ )
93
+ # 确保 temperature > 0 for sampling
94
+ if temperature <= 0:
95
+ generation_kwargs["do_sample"] = False # 如果温度为0或负数,则不进行采样
96
 
97
+ thread = Thread(target=model.generate, kwargs=generation_kwargs)
98
+ thread.start()
99
+
100
+ response = ""
101
+ try:
102
+ for new_text in streamer:
103
+ if new_text is not None: # 确保 new_text 不是 None
104
+ response += new_text
105
+ yield response
106
+ except Exception as e:
107
+ print(f"Error during streaming response: {e}")
108
+ yield "Error: Could not generate response stream."
109
 
110
 
 
 
 
111
  demo = gr.ChatInterface(
112
  respond,
113
  additional_inputs=[
114
+ gr.Textbox(value="You are a helpful AI assistant.", label="System message"), # 默认系统消息
115
+ gr.Slider(minimum=1, maximum=4096, value=512, step=1, label="Max new tokens"),
116
+ gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature"), # 调整了范围和默认值
117
  gr.Slider(
118
+ minimum=0.01, # top_p 通常不会是0
119
  maximum=1.0,
120
  value=0.95,
121
+ step=0.01, # 更细的调整粒度
122
  label="Top-p (nucleus sampling)",
123
  ),
124
  ],
125
+ title="My Local Chatbot",
126
+ description="Chat with a model running locally in this Hugging Face Space.",
127
  )
128
 
129
 
130
  if __name__ == "__main__":
131
+ if model is None or tokenizer is None:
132
+ print("Cannot launch Gradio app because model or tokenizer failed to load.")
133
+ else:
134
+ demo.launch() # 在Hugging Face Spaces中,不需要 share=True
requirements.txt CHANGED
@@ -1 +1,5 @@
1
- huggingface_hub==0.25.2
 
 
 
 
 
1
+ huggingface_hub==0.25.2
2
+ torch
3
+ transformers
4
+ gradio
5
+ accelerate