Update messagers/message_outputer.py
Browse files- messagers/message_outputer.py +65 -19
messagers/message_outputer.py
CHANGED
|
@@ -8,10 +8,10 @@ class OpenaiStreamOutputer:
|
|
| 8 |
|
| 9 |
def __init__(self):
|
| 10 |
self.default_data = {
|
| 11 |
-
"
|
| 12 |
-
"id": "chatcmpl-hugginface",
|
| 13 |
"object": "chat.completion",
|
| 14 |
-
"
|
|
|
|
| 15 |
"system_fingerprint": "fp_44709d6fcb",
|
| 16 |
"choices": [],
|
| 17 |
"usage": {
|
|
@@ -21,25 +21,71 @@ class OpenaiStreamOutputer:
|
|
| 21 |
}
|
| 22 |
}
|
| 23 |
|
| 24 |
-
def data_to_string(self, data={}):
|
| 25 |
data_str = f"{json.dumps(data)}"
|
| 26 |
return data_str
|
| 27 |
|
| 28 |
-
def output(self, content=None,
|
| 29 |
data = self.default_data.copy()
|
| 30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
|
| 32 |
-
|
| 33 |
-
|
|
|
|
|
|
|
| 34 |
|
| 35 |
-
data
|
| 36 |
-
{
|
| 37 |
-
"index": 0,
|
| 38 |
-
"message": message,
|
| 39 |
-
"logprobs": None,
|
| 40 |
-
"finish_reason": "stop" if content_type == "Finished" else None
|
| 41 |
-
}
|
| 42 |
-
]
|
| 43 |
-
|
| 44 |
-
data["usage"] = tokens_info
|
| 45 |
-
return self.data_to_string(data)
|
|
|
|
| 8 |
|
| 9 |
def __init__(self):
|
| 10 |
self.default_data = {
|
| 11 |
+
"id": "chatcmpl-123",
|
|
|
|
| 12 |
"object": "chat.completion",
|
| 13 |
+
"created": 1677652288,
|
| 14 |
+
"model": "gpt-3.5-turbo-0613",
|
| 15 |
"system_fingerprint": "fp_44709d6fcb",
|
| 16 |
"choices": [],
|
| 17 |
"usage": {
|
|
|
|
| 21 |
}
|
| 22 |
}
|
| 23 |
|
| 24 |
+
def data_to_string(self, data={}, content_type=""):
|
| 25 |
data_str = f"{json.dumps(data)}"
|
| 26 |
return data_str
|
| 27 |
|
| 28 |
+
def output(self, content=None, content_type="Completions", tokens_count=0) -> str:
|
| 29 |
data = self.default_data.copy()
|
| 30 |
+
if content_type == "Role":
|
| 31 |
+
data["choices"] = [
|
| 32 |
+
{
|
| 33 |
+
"index": 0,
|
| 34 |
+
"message": {
|
| 35 |
+
"role": "assistant",
|
| 36 |
+
"content": content,
|
| 37 |
+
},
|
| 38 |
+
"logprobs": None,
|
| 39 |
+
"finish_reason": "stop"
|
| 40 |
+
}
|
| 41 |
+
]
|
| 42 |
+
elif content_type in [
|
| 43 |
+
"Completions",
|
| 44 |
+
"InternalSearchQuery",
|
| 45 |
+
"InternalSearchResult",
|
| 46 |
+
"SuggestedResponses",
|
| 47 |
+
]:
|
| 48 |
+
if content_type in ["InternalSearchQuery", "InternalSearchResult"]:
|
| 49 |
+
content += "\n"
|
| 50 |
+
data["choices"] = [
|
| 51 |
+
{
|
| 52 |
+
"index": 0,
|
| 53 |
+
"message": {
|
| 54 |
+
"role": "user",
|
| 55 |
+
"content": content,
|
| 56 |
+
},
|
| 57 |
+
"logprobs": None,
|
| 58 |
+
"finish_reason": None,
|
| 59 |
+
}
|
| 60 |
+
]
|
| 61 |
+
elif content_type == "Finished":
|
| 62 |
+
data["choices"] = [
|
| 63 |
+
{
|
| 64 |
+
"index": 0,
|
| 65 |
+
"message": {
|
| 66 |
+
"role": "assistant",
|
| 67 |
+
"content": content,
|
| 68 |
+
},
|
| 69 |
+
"logprobs": None,
|
| 70 |
+
"finish_reason": "stop",
|
| 71 |
+
}
|
| 72 |
+
]
|
| 73 |
+
else:
|
| 74 |
+
data["choices"] = [
|
| 75 |
+
{
|
| 76 |
+
"index": 0,
|
| 77 |
+
"message": {
|
| 78 |
+
"role": "assistant",
|
| 79 |
+
"content": content,
|
| 80 |
+
},
|
| 81 |
+
"logprobs": None,
|
| 82 |
+
"finish_reason": None,
|
| 83 |
+
}
|
| 84 |
+
]
|
| 85 |
|
| 86 |
+
# Update token counts
|
| 87 |
+
data["usage"]["prompt_tokens"] += tokens_count
|
| 88 |
+
data["usage"]["completion_tokens"] += len(content.split())
|
| 89 |
+
data["usage"]["total_tokens"] = data["usage"]["prompt_tokens"] + data["usage"]["completion_tokens"]
|
| 90 |
|
| 91 |
+
return self.data_to_string(data, content_type)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|