Spaces:
Sleeping
Sleeping
Ilke Ileri
commited on
Commit
·
a1e3c35
1
Parent(s):
a73c020
Fix: revert system prompt, add full OpenAI-compatible response format
Browse files
app.py
CHANGED
|
@@ -82,23 +82,8 @@ def chat_completions():
|
|
| 82 |
if not prompt:
|
| 83 |
return jsonify({"error": "No prompt provided"}), 400
|
| 84 |
|
| 85 |
-
#
|
| 86 |
-
|
| 87 |
-
- Sales techniques and strategies
|
| 88 |
-
- Handling objections (price, timing, competition)
|
| 89 |
-
- Closing deals
|
| 90 |
-
- Lead qualification
|
| 91 |
-
- Customer relationship management
|
| 92 |
-
- Sales processes and frameworks
|
| 93 |
-
- Wisemate's services and capabilities
|
| 94 |
-
|
| 95 |
-
If asked about unrelated topics (science, math, general knowledge, etc.), politely redirect:
|
| 96 |
-
"I'm here to help with sales and business-related questions about Wisemate. How can I assist you with your sales inquiries?"
|
| 97 |
-
|
| 98 |
-
Now respond to this sales-related question:"""
|
| 99 |
-
|
| 100 |
-
# Gemma formatında prompt - sistem prompt'u dahil et
|
| 101 |
-
formatted_prompt = f"<start_of_turn>user\n{system_prompt}\n{prompt}<end_of_turn>\n<start_of_turn>model\n"
|
| 102 |
|
| 103 |
# Model yanıtı üret
|
| 104 |
inputs = tokenizer(formatted_prompt, return_tensors="pt")
|
|
@@ -125,14 +110,25 @@ Now respond to this sales-related question:"""
|
|
| 125 |
response_text = full_response.split("<start_of_turn>model\n")[-1]
|
| 126 |
response_text = response_text.replace("<end_of_turn>", "").strip()
|
| 127 |
|
| 128 |
-
# Vapi
|
| 129 |
vapi_response = {
|
|
|
|
|
|
|
|
|
|
|
|
|
| 130 |
"choices": [{
|
|
|
|
| 131 |
"message": {
|
| 132 |
"role": "assistant",
|
| 133 |
"content": response_text
|
| 134 |
-
}
|
| 135 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 136 |
}
|
| 137 |
|
| 138 |
return jsonify(vapi_response), 200
|
|
|
|
| 82 |
if not prompt:
|
| 83 |
return jsonify({"error": "No prompt provided"}), 400
|
| 84 |
|
| 85 |
+
# Gemma formatında prompt
|
| 86 |
+
formatted_prompt = f"<start_of_turn>user\n{prompt}<end_of_turn>\n<start_of_turn>model\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
|
| 88 |
# Model yanıtı üret
|
| 89 |
inputs = tokenizer(formatted_prompt, return_tensors="pt")
|
|
|
|
| 110 |
response_text = full_response.split("<start_of_turn>model\n")[-1]
|
| 111 |
response_text = response_text.replace("<end_of_turn>", "").strip()
|
| 112 |
|
| 113 |
+
# OpenAI-compatible response format (Vapi için tam uyumlu)
|
| 114 |
vapi_response = {
|
| 115 |
+
"id": "chatcmpl-" + str(hash(prompt))[-10:],
|
| 116 |
+
"object": "chat.completion",
|
| 117 |
+
"created": int(__import__('time').time()),
|
| 118 |
+
"model": MODEL_NAME,
|
| 119 |
"choices": [{
|
| 120 |
+
"index": 0,
|
| 121 |
"message": {
|
| 122 |
"role": "assistant",
|
| 123 |
"content": response_text
|
| 124 |
+
},
|
| 125 |
+
"finish_reason": "stop"
|
| 126 |
+
}],
|
| 127 |
+
"usage": {
|
| 128 |
+
"prompt_tokens": len(inputs["input_ids"][0]),
|
| 129 |
+
"completion_tokens": len(outputs[0]) - len(inputs["input_ids"][0]),
|
| 130 |
+
"total_tokens": len(outputs[0])
|
| 131 |
+
}
|
| 132 |
}
|
| 133 |
|
| 134 |
return jsonify(vapi_response), 200
|