Spaces:
Sleeping
Sleeping
now using 4.1mini with temperature 1
Browse files- core.py +3 -3
- eval/main.py +1 -1
- prompts.py +2 -0
- utils.py +11 -3
core.py
CHANGED
|
@@ -91,7 +91,7 @@ def generate_synthetic_personas(num_personas: int, audience: str, previous_perso
|
|
| 91 |
prompt += build_previous_personas_context(current_context_personas) # Appends the formatted list
|
| 92 |
|
| 93 |
try:
|
| 94 |
-
response_str = call_llm(prompt=prompt, response_format=response_format,temperature=
|
| 95 |
response_data = json.loads(response_str)
|
| 96 |
users_list = response_data.get("users_personas", [])
|
| 97 |
|
|
@@ -169,7 +169,7 @@ def ask_all_questions_to_persona(persona: dict, questions: List[str]) -> str:
|
|
| 169 |
questions=questions,
|
| 170 |
num_questions=len(questions)
|
| 171 |
)
|
| 172 |
-
response_str = call_llm(prompt=prompt,temperature=
|
| 173 |
response_data = json.loads(response_str)
|
| 174 |
answers = response_data.get("answers", [])
|
| 175 |
return answers
|
|
@@ -315,7 +315,7 @@ def chat_with_persona(persona: dict, question: str, conversation_history: List[d
|
|
| 315 |
)
|
| 316 |
if conversation_history:
|
| 317 |
prompt += f"\nHere you have the previous conversation, make sure to answer the question in a way that is consistent with it:\n{history_context}"
|
| 318 |
-
return call_llm(prompt=prompt,temperature=
|
| 319 |
|
| 320 |
def chat_with_report(users: List[dict], question: str, questions: List[str]) -> str:
|
| 321 |
"""
|
|
|
|
| 91 |
prompt += build_previous_personas_context(current_context_personas) # Appends the formatted list
|
| 92 |
|
| 93 |
try:
|
| 94 |
+
response_str = call_llm(prompt=prompt, response_format=response_format,temperature=1, model="gpt-4.1-mini")
|
| 95 |
response_data = json.loads(response_str)
|
| 96 |
users_list = response_data.get("users_personas", [])
|
| 97 |
|
|
|
|
| 169 |
questions=questions,
|
| 170 |
num_questions=len(questions)
|
| 171 |
)
|
| 172 |
+
response_str = call_llm(prompt=prompt,temperature=1, model="gpt-4.1-mini",response_format=response_format)
|
| 173 |
response_data = json.loads(response_str)
|
| 174 |
answers = response_data.get("answers", [])
|
| 175 |
return answers
|
|
|
|
| 315 |
)
|
| 316 |
if conversation_history:
|
| 317 |
prompt += f"\nHere you have the previous conversation, make sure to answer the question in a way that is consistent with it:\n{history_context}"
|
| 318 |
+
return call_llm(prompt=prompt,temperature=1, model="gpt-4.1-mini")
|
| 319 |
|
| 320 |
def chat_with_report(users: List[dict], question: str, questions: List[str]) -> str:
|
| 321 |
"""
|
eval/main.py
CHANGED
|
@@ -116,7 +116,7 @@ def generate_users_batch(
|
|
| 116 |
api_url,
|
| 117 |
headers=headers,
|
| 118 |
json=request_body,
|
| 119 |
-
timeout=
|
| 120 |
)
|
| 121 |
response.raise_for_status()
|
| 122 |
duration = time.time() - start_time
|
|
|
|
| 116 |
api_url,
|
| 117 |
headers=headers,
|
| 118 |
json=request_body,
|
| 119 |
+
timeout=600
|
| 120 |
)
|
| 121 |
response.raise_for_status()
|
| 122 |
duration = time.time() - start_time
|
prompts.py
CHANGED
|
@@ -173,6 +173,7 @@ You are to fully embody and respond as the detailed user persona provided below.
|
|
| 173 |
6. **Crucial Prohibition:**
|
| 174 |
* **DO NOT** start your answer with phrases like "As [Name of Persona]...", "As this persona...", "Based on my profile...", "As an AI language model...", or any other meta-references to being a persona or AI. Speak directly *as* the persona in the first person.
|
| 175 |
|
|
|
|
| 176 |
Think carefully: Given everything you know about this individual – their identity, their job, their past, their personality, their core values, and their communication habits – how would they genuinely and thoughtfully respond to the question, sounding like the unique human they are meant to represent?
|
| 177 |
"""
|
| 178 |
|
|
@@ -250,6 +251,7 @@ Make sure your response is well-structured and professional.
|
|
| 250 |
|
| 251 |
If the question is not relevant to the interview data, please say that you cannot answer it based on the provided information.
|
| 252 |
Answer in the same language of the question.
|
|
|
|
| 253 |
"""
|
| 254 |
|
| 255 |
# Prompt for generating audience name
|
|
|
|
| 173 |
6. **Crucial Prohibition:**
|
| 174 |
* **DO NOT** start your answer with phrases like "As [Name of Persona]...", "As this persona...", "Based on my profile...", "As an AI language model...", or any other meta-references to being a persona or AI. Speak directly *as* the persona in the first person.
|
| 175 |
|
| 176 |
+
Focus on the question asked, do not be biased by the persona's profile.
|
| 177 |
Think carefully: Given everything you know about this individual – their identity, their job, their past, their personality, their core values, and their communication habits – how would they genuinely and thoughtfully respond to the question, sounding like the unique human they are meant to represent?
|
| 178 |
"""
|
| 179 |
|
|
|
|
| 251 |
|
| 252 |
If the question is not relevant to the interview data, please say that you cannot answer it based on the provided information.
|
| 253 |
Answer in the same language of the question.
|
| 254 |
+
Put your focus on the question asked, do not be biased by the interview data.
|
| 255 |
"""
|
| 256 |
|
| 257 |
# Prompt for generating audience name
|
utils.py
CHANGED
|
@@ -2,20 +2,28 @@ import openai
|
|
| 2 |
|
| 3 |
LLM_MODEL = "gpt-4.1-nano"
|
| 4 |
temperature = 0.5
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
-
def call_llm(prompt: str, response_format=None, model=LLM_MODEL,temperature=temperature) -> str:
|
| 7 |
client = openai.OpenAI()
|
| 8 |
if response_format:
|
| 9 |
response = client.beta.chat.completions.parse(
|
| 10 |
model=model,
|
| 11 |
messages=[{"role": "user", "content": prompt}],
|
| 12 |
response_format=response_format,
|
| 13 |
-
temperature=temperature
|
|
|
|
|
|
|
|
|
|
| 14 |
)
|
| 15 |
else:
|
| 16 |
response = client.chat.completions.create(
|
| 17 |
model=model,
|
| 18 |
messages=[{"role": "user", "content": prompt}],
|
| 19 |
-
temperature=temperature
|
|
|
|
|
|
|
| 20 |
)
|
| 21 |
return response.choices[0].message.content
|
|
|
|
| 2 |
|
| 3 |
LLM_MODEL = "gpt-4.1-nano"
|
| 4 |
temperature = 0.5
|
| 5 |
+
frequency_penalty=0
|
| 6 |
+
presence_penalty=0
|
| 7 |
+
top_p=0
|
| 8 |
|
| 9 |
+
def call_llm(prompt: str, response_format=None, model=LLM_MODEL,temperature=temperature,frequency_penalty=frequency_penalty,presence_penalty=presence_penalty,top_p=top_p) -> str:
|
| 10 |
client = openai.OpenAI()
|
| 11 |
if response_format:
|
| 12 |
response = client.beta.chat.completions.parse(
|
| 13 |
model=model,
|
| 14 |
messages=[{"role": "user", "content": prompt}],
|
| 15 |
response_format=response_format,
|
| 16 |
+
temperature=temperature,
|
| 17 |
+
frequency_penalty=frequency_penalty,
|
| 18 |
+
presence_penalty=presence_penalty,
|
| 19 |
+
top_p=top_p
|
| 20 |
)
|
| 21 |
else:
|
| 22 |
response = client.chat.completions.create(
|
| 23 |
model=model,
|
| 24 |
messages=[{"role": "user", "content": prompt}],
|
| 25 |
+
temperature=temperature,
|
| 26 |
+
frequency_penalty=0.0,
|
| 27 |
+
presence_penalty=0.0
|
| 28 |
)
|
| 29 |
return response.choices[0].message.content
|