Update messagers/message_outputer.py
Browse files- messagers/message_outputer.py +38 -12
messagers/message_outputer.py
CHANGED
|
@@ -1,6 +1,5 @@
|
|
| 1 |
import json
|
| 2 |
-
import time
|
| 3 |
-
|
| 4 |
|
| 5 |
class OpenaiStreamOutputer:
|
| 6 |
"""
|
|
@@ -9,28 +8,37 @@ class OpenaiStreamOutputer:
|
|
| 9 |
"""
|
| 10 |
|
| 11 |
def __init__(self):
|
| 12 |
-
current_time = int(time.time())
|
| 13 |
self.default_data = {
|
| 14 |
-
"created": 1700000,
|
| 15 |
"id": "chatcmpl-hugginface",
|
| 16 |
"object": "chat.completion.chunk",
|
| 17 |
-
|
| 18 |
"model": "hugginface",
|
|
|
|
| 19 |
"choices": [],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
}
|
| 21 |
|
| 22 |
def data_to_string(self, data={}, content_type=""):
|
| 23 |
data_str = f"{json.dumps(data)}"
|
| 24 |
return data_str
|
| 25 |
|
| 26 |
-
def output(self, content=None, content_type="Completions") -> str:
|
| 27 |
data = self.default_data.copy()
|
| 28 |
if content_type == "Role":
|
| 29 |
data["choices"] = [
|
| 30 |
{
|
| 31 |
"index": 0,
|
| 32 |
-
"
|
| 33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
}
|
| 35 |
]
|
| 36 |
elif content_type in [
|
|
@@ -44,7 +52,11 @@ class OpenaiStreamOutputer:
|
|
| 44 |
data["choices"] = [
|
| 45 |
{
|
| 46 |
"index": 0,
|
| 47 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
"finish_reason": None,
|
| 49 |
}
|
| 50 |
]
|
|
@@ -52,7 +64,11 @@ class OpenaiStreamOutputer:
|
|
| 52 |
data["choices"] = [
|
| 53 |
{
|
| 54 |
"index": 0,
|
| 55 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
"finish_reason": "stop",
|
| 57 |
}
|
| 58 |
]
|
|
@@ -60,8 +76,18 @@ class OpenaiStreamOutputer:
|
|
| 60 |
data["choices"] = [
|
| 61 |
{
|
| 62 |
"index": 0,
|
| 63 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
"finish_reason": None,
|
| 65 |
}
|
| 66 |
]
|
| 67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import json
|
| 2 |
+
import time
|
|
|
|
| 3 |
|
| 4 |
class OpenaiStreamOutputer:
|
| 5 |
"""
|
|
|
|
| 8 |
"""
|
| 9 |
|
| 10 |
def __init__(self):
|
| 11 |
+
current_time = int(time.time())
|
| 12 |
self.default_data = {
|
|
|
|
| 13 |
"id": "chatcmpl-hugginface",
|
| 14 |
"object": "chat.completion.chunk",
|
| 15 |
+
"created": current_time,
|
| 16 |
"model": "hugginface",
|
| 17 |
+
"system_fingerprint": "fp_44709d6fcb",
|
| 18 |
"choices": [],
|
| 19 |
+
"usage": {
|
| 20 |
+
"prompt_tokens": 0,
|
| 21 |
+
"completion_tokens": 0,
|
| 22 |
+
"total_tokens": 0
|
| 23 |
+
}
|
| 24 |
}
|
| 25 |
|
| 26 |
def data_to_string(self, data={}, content_type=""):
|
| 27 |
data_str = f"{json.dumps(data)}"
|
| 28 |
return data_str
|
| 29 |
|
| 30 |
+
def output(self, content=None, content_type="Completions", tokens_count=0) -> str:
|
| 31 |
data = self.default_data.copy()
|
| 32 |
if content_type == "Role":
|
| 33 |
data["choices"] = [
|
| 34 |
{
|
| 35 |
"index": 0,
|
| 36 |
+
"message": {
|
| 37 |
+
"role": "assistant",
|
| 38 |
+
"content": content,
|
| 39 |
+
},
|
| 40 |
+
"logprobs": None,
|
| 41 |
+
"finish_reason": "stop"
|
| 42 |
}
|
| 43 |
]
|
| 44 |
elif content_type in [
|
|
|
|
| 52 |
data["choices"] = [
|
| 53 |
{
|
| 54 |
"index": 0,
|
| 55 |
+
"message": {
|
| 56 |
+
"role": "user",
|
| 57 |
+
"content": content,
|
| 58 |
+
},
|
| 59 |
+
"logprobs": None,
|
| 60 |
"finish_reason": None,
|
| 61 |
}
|
| 62 |
]
|
|
|
|
| 64 |
data["choices"] = [
|
| 65 |
{
|
| 66 |
"index": 0,
|
| 67 |
+
"message": {
|
| 68 |
+
"role": "assistant",
|
| 69 |
+
"content": content,
|
| 70 |
+
},
|
| 71 |
+
"logprobs": None,
|
| 72 |
"finish_reason": "stop",
|
| 73 |
}
|
| 74 |
]
|
|
|
|
| 76 |
data["choices"] = [
|
| 77 |
{
|
| 78 |
"index": 0,
|
| 79 |
+
"message": {
|
| 80 |
+
"role": "assistant",
|
| 81 |
+
"content": content,
|
| 82 |
+
},
|
| 83 |
+
"logprobs": None,
|
| 84 |
"finish_reason": None,
|
| 85 |
}
|
| 86 |
]
|
| 87 |
+
|
| 88 |
+
# Update token counts
|
| 89 |
+
data["usage"]["prompt_tokens"] += tokens_count
|
| 90 |
+
data["usage"]["completion_tokens"] += len(content.split())
|
| 91 |
+
data["usage"]["total_tokens"] = data["usage"]["prompt_tokens"] + data["usage"]["completion_tokens"]
|
| 92 |
+
|
| 93 |
+
return self.data_to_string(data, content_type)
|