Update persona_alpha_1.py
Browse files- persona_alpha_1.py +2 -2
persona_alpha_1.py
CHANGED
|
@@ -67,7 +67,7 @@ def generate_detailed_text(gender, age_range, life_stage, education_level, job_t
|
|
| 67 |
|
| 68 |
#for _ in range(loop):
|
| 69 |
response = client.chat.completions.create(
|
| 70 |
-
model='gpt-
|
| 71 |
max_tokens=4096,
|
| 72 |
temperature=0.5,
|
| 73 |
messages=messages_base
|
|
@@ -75,7 +75,7 @@ def generate_detailed_text(gender, age_range, life_stage, education_level, job_t
|
|
| 75 |
completed_text = response.choices[0].message.content
|
| 76 |
total_prompt_tokens_used += response.usage.prompt_tokens
|
| 77 |
total_completion_tokens_used += response.usage.completion_tokens
|
| 78 |
-
price = total_prompt_tokens_used*0.
|
| 79 |
|
| 80 |
full_text += completed_text + "\n\n----------\n\n"
|
| 81 |
total_price += price
|
|
|
|
| 67 |
|
| 68 |
#for _ in range(loop):
|
| 69 |
response = client.chat.completions.create(
|
| 70 |
+
model='gpt-4o',
|
| 71 |
max_tokens=4096,
|
| 72 |
temperature=0.5,
|
| 73 |
messages=messages_base
|
|
|
|
| 75 |
completed_text = response.choices[0].message.content
|
| 76 |
total_prompt_tokens_used += response.usage.prompt_tokens
|
| 77 |
total_completion_tokens_used += response.usage.completion_tokens
|
| 78 |
+
price = total_prompt_tokens_used*0.005/1000 + total_completion_tokens_used*0.015/1000
|
| 79 |
|
| 80 |
full_text += completed_text + "\n\n----------\n\n"
|
| 81 |
total_price += price
|