Rfym21 commited on
Commit
c85e8a1
·
verified ·
1 Parent(s): 9a48cbc

Delete utils.py

Browse files
Files changed (1) hide show
  1. utils.py +0 -105
utils.py DELETED
@@ -1,105 +0,0 @@
1
- import json
2
- import uuid
3
- import time
4
- import tiktoken
5
- from constants import CHAT_COMPLETION_CHUNK, CONTENT_TYPE_EVENT_STREAM
6
- from flask import jsonify
7
-
8
- def generate_system_fingerprint():
9
- """生成并返回唯一的系统指纹。"""
10
- return f"fp_{uuid.uuid4().hex[:10]}"
11
-
12
- def create_openai_chunk(content, model, finish_reason=None, usage=None):
13
- """创建格式化的 OpenAI 响应块。"""
14
- chunk = {
15
- "id": f"chatcmpl-{uuid.uuid4()}",
16
- "object": CHAT_COMPLETION_CHUNK,
17
- "created": int(time.time()),
18
- "model": model,
19
- "system_fingerprint": generate_system_fingerprint(),
20
- "choices": [
21
- {
22
- "index": 0,
23
- "delta": {"content": content} if content else {},
24
- "logprobs": None,
25
- "finish_reason": finish_reason
26
- }
27
- ]
28
- }
29
- if usage is not None:
30
- chunk["usage"] = usage
31
- return chunk
32
-
33
- def count_tokens(text, model="gpt-3.5-turbo-0301"):
34
- """计算给定文本的令牌数量。"""
35
- try:
36
- return len(tiktoken.encoding_for_model(model).encode(text))
37
- except KeyError:
38
- return len(tiktoken.get_encoding("cl100k_base").encode(text))
39
-
40
- def count_message_tokens(messages, model="gpt-3.5-turbo-0301"):
41
- """计算消息列表中的总令牌数量。"""
42
- return sum(count_tokens(str(message), model) for message in messages)
43
-
44
- def stream_notdiamond_response(response, model):
45
- """流式处理 notdiamond API 响应。"""
46
- buffer = ""
47
-
48
- for chunk in response.iter_content(1024):
49
- if chunk:
50
- buffer = chunk.decode('utf-8')
51
- yield create_openai_chunk(buffer, model)
52
-
53
- yield create_openai_chunk('', model, 'stop')
54
-
55
- def handle_non_stream_response(response, model, prompt_tokens):
56
- """处理非流式 API 响应并构建最终 JSON。"""
57
- full_content = ""
58
-
59
- for chunk in stream_notdiamond_response(response, model):
60
- if chunk['choices'][0]['delta'].get('content'):
61
- full_content += chunk['choices'][0]['delta']['content']
62
-
63
- completion_tokens = count_tokens(full_content, model)
64
- total_tokens = prompt_tokens + completion_tokens
65
-
66
- return jsonify({
67
- "id": f"chatcmpl-{uuid.uuid4()}",
68
- "object": "chat.completion",
69
- "created": int(time.time()),
70
- "model": model,
71
- "system_fingerprint": generate_system_fingerprint(),
72
- "choices": [
73
- {
74
- "index": 0,
75
- "message": {
76
- "role": "assistant",
77
- "content": full_content
78
- },
79
- "finish_reason": "stop"
80
- }
81
- ],
82
- "usage": {
83
- "prompt_tokens": prompt_tokens,
84
- "completion_tokens": completion_tokens,
85
- "total_tokens": total_tokens
86
- }
87
- })
88
-
89
- def generate_stream_response(response, model, prompt_tokens):
90
- """生成流式 HTTP 响应。"""
91
- total_completion_tokens = 0
92
-
93
- for chunk in stream_notdiamond_response(response, model):
94
- content = chunk['choices'][0]['delta'].get('content', '')
95
- total_completion_tokens += count_tokens(content, model)
96
-
97
- chunk['usage'] = {
98
- "prompt_tokens": prompt_tokens,
99
- "completion_tokens": total_completion_tokens,
100
- "total_tokens": prompt_tokens + total_completion_tokens
101
- }
102
-
103
- yield f"data: {json.dumps(chunk)}\n\n"
104
-
105
- yield "data: [DONE]\n\n"