Spaces:
Running
Running
clean code
Browse files- src/app.py +1 -15
src/app.py
CHANGED
|
@@ -26,8 +26,6 @@ class Config:
|
|
| 26 |
# Initialize LangSmith
|
| 27 |
self.langsmith_client = Client(api_key=self.langsmith_api_key)
|
| 28 |
|
| 29 |
-
# print(f"OpenAI Api Key: {self.openai_api_key[:7]}")
|
| 30 |
-
|
| 31 |
class FileReader:
|
| 32 |
def __init__(self):
|
| 33 |
self.linkedin_profile = ""
|
|
@@ -40,7 +38,6 @@ class FileReader:
|
|
| 40 |
except Exception:
|
| 41 |
# If file missing, keep empty
|
| 42 |
self.linkedin_profile = ""
|
| 43 |
-
# NOT IMPLEMENTED ---> CREATE FILE AND CHANGE IN THE APP WHERE APPLICABLE
|
| 44 |
try:
|
| 45 |
with open("../me/additional_info.txt", "r", encoding="utf-8") as f:
|
| 46 |
self.additional_info = f.read()
|
|
@@ -125,11 +122,6 @@ class MyProfileAvatarChat(Config, FileReader):
|
|
| 125 |
|
| 126 |
@traceable(run_type="llm", name="RerunRejectedAnswer")
|
| 127 |
def rerun(self, reply, message, history, feedback, **kwargs):
|
| 128 |
-
# updated_system_prompt = self.system_prompt + "\n\n## Previous answer rejected\n \
|
| 129 |
-
# You just tried to reply, but the quality control rejected your reply\n"
|
| 130 |
-
# updated_system_prompt += f"## Your attempted answer:\n{reply}\n\n"
|
| 131 |
-
# updated_system_prompt += f"## Reason for rejection:\n{feedback}\n\n"
|
| 132 |
-
|
| 133 |
updated_system_prompt = (
|
| 134 |
self.system_prompt
|
| 135 |
+ "\n\n## Previous answer rejected\n"
|
|
@@ -137,8 +129,6 @@ class MyProfileAvatarChat(Config, FileReader):
|
|
| 137 |
+ f"## Your attempted answer:\n{reply}\n\n"
|
| 138 |
+ f"## Reason for rejection:\n{feedback}\n\n"
|
| 139 |
)
|
| 140 |
-
|
| 141 |
-
|
| 142 |
messages = [{"role": "system", "content": updated_system_prompt}] + history + \
|
| 143 |
[{"role": "user", "content": message}]
|
| 144 |
try:
|
|
@@ -160,7 +150,6 @@ class MyProfileAvatarChat(Config, FileReader):
|
|
| 160 |
Returns:
|
| 161 |
reply string
|
| 162 |
"""
|
| 163 |
-
|
| 164 |
# Cache exact-match short-circuit
|
| 165 |
if message in (qa["question"] for qa in self.qa_cache):
|
| 166 |
# exact match
|
|
@@ -184,7 +173,6 @@ class MyProfileAvatarChat(Config, FileReader):
|
|
| 184 |
)
|
| 185 |
messages = [{"role": "system", "content": self.system_prompt},
|
| 186 |
{"role": "user", "content": refine_prompt}]
|
| 187 |
-
|
| 188 |
try:
|
| 189 |
response = self.openai.chat.completions.create(
|
| 190 |
model="gpt-4o-mini",
|
|
@@ -193,8 +181,7 @@ class MyProfileAvatarChat(Config, FileReader):
|
|
| 193 |
reply = response.choices[0].message.content
|
| 194 |
except Exception as e:
|
| 195 |
print(f"Error calling OpenAI for refinement: {e}")
|
| 196 |
-
reply = similar["answer"]
|
| 197 |
-
|
| 198 |
else:
|
| 199 |
# Build token-efficent context (sliding window)
|
| 200 |
temp_history = history + [{"role": "user", "content": message}]
|
|
@@ -209,7 +196,6 @@ class MyProfileAvatarChat(Config, FileReader):
|
|
| 209 |
reply = response.choices[0].message.content
|
| 210 |
except Exception as e:
|
| 211 |
print(f"Error calling OpenAI: {e}")
|
| 212 |
-
|
| 213 |
# Evaluate the reply
|
| 214 |
try:
|
| 215 |
evaluation = self.evaluate(reply, message, history)
|
|
|
|
| 26 |
# Initialize LangSmith
|
| 27 |
self.langsmith_client = Client(api_key=self.langsmith_api_key)
|
| 28 |
|
|
|
|
|
|
|
| 29 |
class FileReader:
|
| 30 |
def __init__(self):
|
| 31 |
self.linkedin_profile = ""
|
|
|
|
| 38 |
except Exception:
|
| 39 |
# If file missing, keep empty
|
| 40 |
self.linkedin_profile = ""
|
|
|
|
| 41 |
try:
|
| 42 |
with open("../me/additional_info.txt", "r", encoding="utf-8") as f:
|
| 43 |
self.additional_info = f.read()
|
|
|
|
| 122 |
|
| 123 |
@traceable(run_type="llm", name="RerunRejectedAnswer")
|
| 124 |
def rerun(self, reply, message, history, feedback, **kwargs):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 125 |
updated_system_prompt = (
|
| 126 |
self.system_prompt
|
| 127 |
+ "\n\n## Previous answer rejected\n"
|
|
|
|
| 129 |
+ f"## Your attempted answer:\n{reply}\n\n"
|
| 130 |
+ f"## Reason for rejection:\n{feedback}\n\n"
|
| 131 |
)
|
|
|
|
|
|
|
| 132 |
messages = [{"role": "system", "content": updated_system_prompt}] + history + \
|
| 133 |
[{"role": "user", "content": message}]
|
| 134 |
try:
|
|
|
|
| 150 |
Returns:
|
| 151 |
reply string
|
| 152 |
"""
|
|
|
|
| 153 |
# Cache exact-match short-circuit
|
| 154 |
if message in (qa["question"] for qa in self.qa_cache):
|
| 155 |
# exact match
|
|
|
|
| 173 |
)
|
| 174 |
messages = [{"role": "system", "content": self.system_prompt},
|
| 175 |
{"role": "user", "content": refine_prompt}]
|
|
|
|
| 176 |
try:
|
| 177 |
response = self.openai.chat.completions.create(
|
| 178 |
model="gpt-4o-mini",
|
|
|
|
| 181 |
reply = response.choices[0].message.content
|
| 182 |
except Exception as e:
|
| 183 |
print(f"Error calling OpenAI for refinement: {e}")
|
| 184 |
+
reply = similar["answer"]
|
|
|
|
| 185 |
else:
|
| 186 |
# Build token-efficent context (sliding window)
|
| 187 |
temp_history = history + [{"role": "user", "content": message}]
|
|
|
|
| 196 |
reply = response.choices[0].message.content
|
| 197 |
except Exception as e:
|
| 198 |
print(f"Error calling OpenAI: {e}")
|
|
|
|
| 199 |
# Evaluate the reply
|
| 200 |
try:
|
| 201 |
evaluation = self.evaluate(reply, message, history)
|