fiewolf1000 commited on
Commit
ec8e47c
·
verified ·
1 Parent(s): 1b4691b

Update inference_node.py

Browse files
Files changed (1) hide show
  1. inference_node.py +43 -30
inference_node.py CHANGED
@@ -13,91 +13,105 @@ from transformers import (
13
  # 1. 基础配置
14
  logging.basicConfig(level=logging.INFO, format="%(asctime)s-%(name)s-%(levelname)s-%(message)s")
15
  logger = logging.getLogger("inference_node_deepseek")
16
- app = FastAPI(title="推理节点服务(Deepseek-8B)")
17
 
18
- # 2. 模型配置(替换为Deepseek-8B-Instruct模型
19
- # Deepseek-8B在数学推理和编程任务上表现突出,适合业场景
20
- MODEL_NAME = os.getenv("MODEL_NAME", "deepseek-ai/deepseek-llm-8b-instruct")
21
- HF_TOKEN = os.getenv("HUGGINGFACE_HUB_TOKEN") # 公开模型,无需令牌
22
 
23
- # 3. 4bit量化配置(适配16G内存,优化Deepseek性能
24
  bnb_config = BitsAndBytesConfig(
25
  load_in_4bit=True,
26
  bnb_4bit_use_double_quant=True,
27
  bnb_4bit_quant_type="nf4",
28
- bnb_4bit_compute_dtype=torch.float16 # Deepseek更合float16计算类型
29
  )
30
 
31
- # 4. 加载模型
32
  try:
33
  logger.info(f"开始加载模型:{MODEL_NAME}(4bit量化)")
 
34
  tokenizer = AutoTokenizer.from_pretrained(
35
  MODEL_NAME,
36
  token=HF_TOKEN,
37
  padding_side="right",
38
- trust_remote_code=True # Deepseek要启用远程代码信任
39
  )
40
- # 设置pad_token(Deepseek默认没有pad_token
41
  if tokenizer.pad_token is None:
42
  tokenizer.pad_token = tokenizer.eos_token
43
 
 
44
  model = AutoModelForCausalLM.from_pretrained(
45
  MODEL_NAME,
46
  quantization_config=bnb_config,
47
- device_map="auto",
48
  token=HF_TOKEN,
49
  trust_remote_code=True,
50
  torch_dtype=torch.float16
51
  )
52
- # 流式生成器配置(保留必要的特殊标记)
53
  streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=False)
54
  logger.info(f"模型 {MODEL_NAME} 加载成功!显存占用约 5-6GB(4bit 量化)")
55
  except Exception as e:
56
  logger.error(f"模型加载失败:{str(e)}", exc_info=True)
57
  raise SystemExit(f"服务终止:{str(e)}")
58
 
59
- # 5. 请求模型
60
  class NodeInferenceRequest(BaseModel):
61
- prompt: str
 
62
  max_tokens: int = 1024
63
 
64
- # 6. 流式推理接口(适配Deepseek对话格式)
65
  @app.post("/node/stream-infer")
66
  async def stream_infer(req: NodeInferenceRequest, request: Request):
67
  try:
68
- # Deepseek要求的对话格式[{"role": "user", "content": "..."}]
69
- # 不需要额外添加系统提示,模型会自动处理
70
- inputs = tokenizer.apply_chat_template(
71
- [{"role": "user", "content": req.prompt}],
72
- tokenize=True,
73
- add_generation_prompt=True,
74
- return_tensors="pt"
 
 
 
 
 
75
  ).to(model.device)
76
 
 
77
  async def generate_chunks():
78
  loop = asyncio.get_running_loop()
 
79
  outputs = await loop.run_in_executor(
80
  None,
81
  lambda: model.generate(
82
- inputs,
83
  streamer=streamer,
84
  max_new_tokens=req.max_tokens,
85
  do_sample=True,
86
- temperature=0.7,
87
  top_p=0.95,
88
  pad_token_id=tokenizer.pad_token_id,
89
  eos_token_id=tokenizer.eos_token_id
90
  )
91
  )
92
 
93
- # 取生成部分排除输入部分
94
- generated_tokens = outputs[0][len(inputs[0]):]
95
  for token in generated_tokens:
96
  if await request.is_disconnected():
 
97
  break
 
98
  token_text = tokenizer.decode(token, skip_special_tokens=True)
 
99
  escaped_text = token_text.replace('"', '\\"').replace('\n', '\\n')
100
  yield '{{"chunk":"{}","finish":false}}\n'.format(escaped_text)
 
101
  yield '{"chunk":"","finish":true}\n'
102
 
103
  return StreamingResponse(generate_chunks(), media_type="application/x-ndjson")
@@ -107,17 +121,16 @@ async def stream_infer(req: NodeInferenceRequest, request: Request):
107
  logger.error(error_msg, exc_info=True)
108
  raise HTTPException(status_code=500, detail=error_msg)
109
 
110
- # 7. 健康检查
111
  @app.get("/node/health")
112
  async def node_health():
113
  return {
114
  "status": "healthy",
115
  "model": MODEL_NAME,
116
  "support_stream": True,
117
- "note": "Deepseek-8B 4bit量化,适配16G内存,擅长数学推理和编程任务"
118
  }
119
 
120
  if __name__ == "__main__":
121
  import uvicorn
122
- uvicorn.run(app, host="0.0.0.0", port=7860, log_level="info")
123
-
 
13
  # 1. 基础配置
14
  logging.basicConfig(level=logging.INFO, format="%(asctime)s-%(name)s-%(levelname)s-%(message)s")
15
  logger = logging.getLogger("inference_node_deepseek")
16
+ app = FastAPI(title="推理节点服务(DeepSeek-Coder-V2)")
17
 
18
+ # 2. 模型配置:使用 Hugging Face 公开存在的 DeepSeek 模型
19
+ # 正确 ID:deepseek-ai/deepseek-coder-v2(代码用,公开无需令牌)
20
+ MODEL_NAME = os.getenv("MODEL_NAME", "deepseek-ai/deepseek-coder-v2")
21
+ HF_TOKEN = os.getenv("HUGGINGFACE_HUB_TOKEN") # 公开模型,可留空
22
 
23
+ # 3. 4bit量化配置(适配16G内存,DeepSeek 优化)
24
  bnb_config = BitsAndBytesConfig(
25
  load_in_4bit=True,
26
  bnb_4bit_use_double_quant=True,
27
  bnb_4bit_quant_type="nf4",
28
+ bnb_4bit_compute_dtype=torch.float16 # 降低显存占用,配 DeepSeek
29
  )
30
 
31
+ # 4. 加载 DeepSeek 模型(确保无 ID 错误)
32
  try:
33
  logger.info(f"开始加载模型:{MODEL_NAME}(4bit量化)")
34
+ # 加载 Tokenizer(DeepSeek-Coder 专用配置)
35
  tokenizer = AutoTokenizer.from_pretrained(
36
  MODEL_NAME,
37
  token=HF_TOKEN,
38
  padding_side="right",
39
+ trust_remote_code=True # :DeepSeek 模型需加载自定义代码
40
  )
41
+ # 手动设置 pad_token(DeepSeek 默认无,避免生成警告
42
  if tokenizer.pad_token is None:
43
  tokenizer.pad_token = tokenizer.eos_token
44
 
45
+ # 加载量化模型
46
  model = AutoModelForCausalLM.from_pretrained(
47
  MODEL_NAME,
48
  quantization_config=bnb_config,
49
+ device_map="auto", # 自动分配 GPU/CPU
50
  token=HF_TOKEN,
51
  trust_remote_code=True,
52
  torch_dtype=torch.float16
53
  )
54
+ # 流式生成器(保留代码格式所需的特殊标记)
55
  streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=False)
56
  logger.info(f"模型 {MODEL_NAME} 加载成功!显存占用约 5-6GB(4bit 量化)")
57
  except Exception as e:
58
  logger.error(f"模型加载失败:{str(e)}", exc_info=True)
59
  raise SystemExit(f"服务终止:{str(e)}")
60
 
61
+ # 5. 请求模型(支持指定编程语言,适配代码生成场景)
62
  class NodeInferenceRequest(BaseModel):
63
+ prompt: str # 代码需求(如“用Python写快速排序”)
64
+ language: str = "python" # 可选:指定编程语言
65
  max_tokens: int = 1024
66
 
67
+ # 6. 流式推理接口(适配 DeepSeek-Coder 对话格式)
68
  @app.post("/node/stream-infer")
69
  async def stream_infer(req: NodeInferenceRequest, request: Request):
70
  try:
71
+ # 关键DeepSeek-Coder 代码生成格式(明确语言类型,提升准确性)
72
+ code_prompt = f"""You are a professional code assistant. Write clean, runnable code for the following requirement.
73
+ Programming Language: {req.language}
74
+ Requirement: {req.prompt}
75
+ Code (with comments):
76
+ """
77
+ # 构建输入(用标准 tokenize 方法,避免 build_chat_input 兼容问题)
78
+ inputs = tokenizer(
79
+ code_prompt,
80
+ return_tensors="pt",
81
+ truncation=True,
82
+ max_length=2048 # 限制输入长度,预留生成空间
83
  ).to(model.device)
84
 
85
+ # 异步生成器
86
  async def generate_chunks():
87
  loop = asyncio.get_running_loop()
88
+ # 调用 DeepSeek-Coder 生成代码(低温度确保语法正确)
89
  outputs = await loop.run_in_executor(
90
  None,
91
  lambda: model.generate(
92
+ **inputs,
93
  streamer=streamer,
94
  max_new_tokens=req.max_tokens,
95
  do_sample=True,
96
+ temperature=0.2, # 代码生成用低温度(0.2-0.4),避免语法错误
97
  top_p=0.95,
98
  pad_token_id=tokenizer.pad_token_id,
99
  eos_token_id=tokenizer.eos_token_id
100
  )
101
  )
102
 
103
+ # 逐段解码代码(仅取生成部分排除输入 Prompt
104
+ generated_tokens = outputs[0][len(inputs["input_ids"][0]):]
105
  for token in generated_tokens:
106
  if await request.is_disconnected():
107
+ logger.info("客户端断开,停止生成")
108
  break
109
+ # 解码 Token(保留代码格式)
110
  token_text = tokenizer.decode(token, skip_special_tokens=True)
111
+ # 处理 JSON 转义(保留代码中的双引号和换行)
112
  escaped_text = token_text.replace('"', '\\"').replace('\n', '\\n')
113
  yield '{{"chunk":"{}","finish":false}}\n'.format(escaped_text)
114
+ # 生成结束标识
115
  yield '{"chunk":"","finish":true}\n'
116
 
117
  return StreamingResponse(generate_chunks(), media_type="application/x-ndjson")
 
121
  logger.error(error_msg, exc_info=True)
122
  raise HTTPException(status_code=500, detail=error_msg)
123
 
124
+ # 7. 健康检查(确认模型正确加载)
125
  @app.get("/node/health")
126
  async def node_health():
127
  return {
128
  "status": "healthy",
129
  "model": MODEL_NAME,
130
  "support_stream": True,
131
+ "note": "DeepSeek-Coder-V2 4bit量化,适配16G内存,擅长Python/C++/Java代码生成"
132
  }
133
 
134
  if __name__ == "__main__":
135
  import uvicorn
136
+ uvicorn.run(app, host="0.0.0.0", port=7860, log_level="info")