Commit ·
d59e3c0
1
Parent(s): 282b49d
- adding other LLMs
Browse files
Config_files/message_system_config.json
CHANGED
|
@@ -21,7 +21,7 @@
|
|
| 21 |
"AI_phrases_singeo": ["your voice deserves more"],
|
| 22 |
"header_limit": 30,
|
| 23 |
"message_limit": 110,
|
| 24 |
-
"LLM_models": ["gpt-4o-mini", "gpt-4o", "gpt-4.1-mini"],
|
| 25 |
"openai_models": ["gpt-4o-mini", "gpt-4o", "gpt-4.1-nano", "gpt-3.5-turbo", "gpt-4.1-mini"],
|
| 26 |
"reasoning": ["o1", "o4-mini", "o1-mini", "o3-mini"],
|
| 27 |
"ollama_models": ["deepseek-r1:1.5b", "gemma3:4b", "deepseek-r1:7b", "gemma3:4b"],
|
|
|
|
| 21 |
"AI_phrases_singeo": ["your voice deserves more"],
|
| 22 |
"header_limit": 30,
|
| 23 |
"message_limit": 110,
|
| 24 |
+
"LLM_models": ["gpt-4o-mini", "gpt-4o", "gpt-4.1-mini", "gemini-2.5-flash-lite-preview-06-17", "claude-3-5-haiku-latest", "google/gemma-3-27b-instruct/bf-16"],
|
| 25 |
"openai_models": ["gpt-4o-mini", "gpt-4o", "gpt-4.1-nano", "gpt-3.5-turbo", "gpt-4.1-mini"],
|
| 26 |
"reasoning": ["o1", "o4-mini", "o1-mini", "o3-mini"],
|
| 27 |
"ollama_models": ["deepseek-r1:1.5b", "gemma3:4b", "deepseek-r1:7b", "gemma3:4b"],
|
Messaging_system/LLM.py
CHANGED
|
@@ -419,16 +419,39 @@ class LLM:
|
|
| 419 |
return None
|
| 420 |
|
| 421 |
# ===============================================================
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 422 |
def preprocess_and_parse_json_claude(self, response: str):
|
| 423 |
"""
|
| 424 |
-
|
| 425 |
-
:param response:
|
| 426 |
-
:return:
|
| 427 |
"""
|
| 428 |
-
|
| 429 |
-
|
| 430 |
-
|
| 431 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 432 |
|
| 433 |
|
| 434 |
|
|
|
|
| 419 |
return None
|
| 420 |
|
| 421 |
# ===============================================================
|
| 422 |
+
# def preprocess_and_parse_json_claude(self, response: str):
|
| 423 |
+
# """
|
| 424 |
+
# process claude response and extract JSON
|
| 425 |
+
# :param response:
|
| 426 |
+
# :return:
|
| 427 |
+
# """
|
| 428 |
+
# json_start = response.index("{")
|
| 429 |
+
# json_end = response.rfind("}")
|
| 430 |
+
# parsed_response = json.loads(response[json_start:json_end + 1])
|
| 431 |
+
# return parsed_response
|
| 432 |
+
#
|
| 433 |
+
|
| 434 |
def preprocess_and_parse_json_claude(self, response: str):
|
| 435 |
"""
|
| 436 |
+
Process Claude response and extract JSON content safely
|
|
|
|
|
|
|
| 437 |
"""
|
| 438 |
+
try:
|
| 439 |
+
json_start = response.index("{")
|
| 440 |
+
json_end = response.rfind("}")
|
| 441 |
+
json_string = response[json_start:json_end + 1]
|
| 442 |
+
|
| 443 |
+
parsed_response = json.loads(json_string)
|
| 444 |
+
|
| 445 |
+
if not isinstance(parsed_response, dict):
|
| 446 |
+
raise ValueError(f"Parsed response is not a dict: {parsed_response}")
|
| 447 |
+
|
| 448 |
+
return parsed_response
|
| 449 |
+
|
| 450 |
+
except ValueError as ve:
|
| 451 |
+
raise ValueError(f"Could not extract JSON from Claude response: {ve}\nOriginal response: {response}")
|
| 452 |
+
except json.JSONDecodeError as je:
|
| 453 |
+
raise ValueError(f"Failed to parse JSON from string: {json_string}\nError: {je}")
|
| 454 |
+
|
| 455 |
|
| 456 |
|
| 457 |
|
Messaging_system/PromptGenerator.py
CHANGED
|
@@ -253,6 +253,7 @@ Goal: Make the recommendation feel personalized and casually relevant — not ge
|
|
| 253 |
|
| 254 |
instructions = f"""
|
| 255 |
### **Output instructions**:
|
|
|
|
| 256 |
|
| 257 |
**Expected output structure:**
|
| 258 |
|
|
|
|
| 253 |
|
| 254 |
instructions = f"""
|
| 255 |
### **Output instructions**:
|
| 256 |
+
- header < {self.Core.config_file["header_limit"]} and message < {self.Core.config_file["message_limit"]} characters.
|
| 257 |
|
| 258 |
**Expected output structure:**
|
| 259 |
|
messaging_main_test.py
CHANGED
|
@@ -194,7 +194,7 @@ Message: These lessons are curated just for you. Start singing today!
|
|
| 194 |
# o3-mini o1-mini o4-mini o1
|
| 195 |
|
| 196 |
users_message = permes.create_personalize_messages(session=session,
|
| 197 |
-
model="
|
| 198 |
users=users,
|
| 199 |
brand=brand,
|
| 200 |
config_file=config_file,
|
|
|
|
| 194 |
# o3-mini o1-mini o4-mini o1
|
| 195 |
|
| 196 |
users_message = permes.create_personalize_messages(session=session,
|
| 197 |
+
model="google/gemma-3-27b-instruct/bf-16",
|
| 198 |
users=users,
|
| 199 |
brand=brand,
|
| 200 |
config_file=config_file,
|