Spaces:
Paused
Paused
Upload folder using huggingface_hub
Browse files- app.py +38 -9
- config.ini +1 -1
app.py
CHANGED
|
@@ -20,12 +20,41 @@ def patch_tinytroupe():
|
|
| 20 |
if os.path.exists(path):
|
| 21 |
with open(path, "r") as f:
|
| 22 |
content = f.read()
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
with open(path, "w") as f:
|
| 27 |
f.write(content)
|
| 28 |
-
print("TinyTroupe patched to
|
| 29 |
|
| 30 |
clone_tinytroupe()
|
| 31 |
|
|
@@ -96,9 +125,9 @@ def get_repo_branches(repo_full_name):
|
|
| 96 |
return ["main"]
|
| 97 |
|
| 98 |
def generate_personas(theme, customer_profile, num_personas):
|
| 99 |
-
#
|
| 100 |
-
config_manager.update("model", "alias-
|
| 101 |
-
config_manager.update("reasoning_model", "alias-
|
| 102 |
|
| 103 |
context = f"A company related to {theme}. Target customers: {customer_profile}"
|
| 104 |
|
|
@@ -164,8 +193,8 @@ def generate_tasks(theme, customer_profile):
|
|
| 164 |
Return the tasks as a JSON list of strings in the format: {{"tasks": ["task1", "task2", ...]}}
|
| 165 |
"""
|
| 166 |
|
| 167 |
-
#
|
| 168 |
-
for model_name in ["alias-large"
|
| 169 |
try:
|
| 170 |
response = client.chat.completions.create(
|
| 171 |
model=model_name,
|
|
|
|
| 20 |
if os.path.exists(path):
|
| 21 |
with open(path, "r") as f:
|
| 22 |
content = f.read()
|
| 23 |
+
|
| 24 |
+
# Ensure alias-large is used (it is the default in the client() function in the branch)
|
| 25 |
+
|
| 26 |
+
# Handle 502 errors on Helmholtz by waiting 35 seconds
|
| 27 |
+
old_502_block = """ # Temporary fallback for 502 errors on Helmholtz
|
| 28 |
+
if isinstance(e, openai.APIStatusError) and e.status_code == 502 and isinstance(self, HelmholtzBlabladorClient):
|
| 29 |
+
logger.warning("Helmholtz API returned a 502 error. Temporarily falling back to OpenAI for this request.")
|
| 30 |
+
try:
|
| 31 |
+
fallback_client = _get_client_for_api_type("openai")
|
| 32 |
+
fallback_chat_api_params = chat_api_params.copy()
|
| 33 |
+
fallback_chat_api_params["model"] = "gpt-4o-mini"
|
| 34 |
+
fallback_chat_api_params["max_tokens"] = 16384
|
| 35 |
+
|
| 36 |
+
response = fallback_client._raw_model_call(fallback_chat_api_params["model"], fallback_chat_api_params)
|
| 37 |
+
|
| 38 |
+
if enable_pydantic_model_return:
|
| 39 |
+
return utils.to_pydantic_or_sanitized_dict(fallback_client._raw_model_response_extractor(response), model=response_format)
|
| 40 |
+
else:
|
| 41 |
+
return utils.sanitize_dict(fallback_client._raw_model_response_extractor(response))
|
| 42 |
+
except Exception as fallback_e:
|
| 43 |
+
logger.error(f"Fallback to OpenAI also failed: {fallback_e}")"""
|
| 44 |
+
|
| 45 |
+
new_502_block = """ # Handle 502 errors on Helmholtz by waiting 35 seconds
|
| 46 |
+
if isinstance(e, openai.APIStatusError) and e.status_code == 502 and isinstance(self, HelmholtzBlabladorClient):
|
| 47 |
+
logger.warning("Helmholtz API returned a 502 error. Waiting 35 seconds before retrying...")
|
| 48 |
+
time.sleep(35)"""
|
| 49 |
+
|
| 50 |
+
if old_502_block in content:
|
| 51 |
+
content = content.replace(old_502_block, new_502_block)
|
| 52 |
+
else:
|
| 53 |
+
print("Could not find the 502 block to patch.")
|
| 54 |
+
|
| 55 |
with open(path, "w") as f:
|
| 56 |
f.write(content)
|
| 57 |
+
print("TinyTroupe patched to handle 502 errors with 35s wait.")
|
| 58 |
|
| 59 |
clone_tinytroupe()
|
| 60 |
|
|
|
|
| 125 |
return ["main"]
|
| 126 |
|
| 127 |
def generate_personas(theme, customer_profile, num_personas):
|
| 128 |
+
# Ensure alias-large is used
|
| 129 |
+
config_manager.update("model", "alias-large")
|
| 130 |
+
config_manager.update("reasoning_model", "alias-large")
|
| 131 |
|
| 132 |
context = f"A company related to {theme}. Target customers: {customer_profile}"
|
| 133 |
|
|
|
|
| 193 |
Return the tasks as a JSON list of strings in the format: {{"tasks": ["task1", "task2", ...]}}
|
| 194 |
"""
|
| 195 |
|
| 196 |
+
# Use alias-large
|
| 197 |
+
for model_name in ["alias-large"]:
|
| 198 |
try:
|
| 199 |
response = client.chat.completions.create(
|
| 200 |
model=model_name,
|
config.ini
CHANGED
|
@@ -18,7 +18,7 @@ BLABLADOR_ENDPOINT=https://api.helmholtz-blablador.fz-juelich.de/v1
|
|
| 18 |
#
|
| 19 |
|
| 20 |
# The main text generation model, used for agent responses
|
| 21 |
-
MODEL=alias-
|
| 22 |
|
| 23 |
# Reasoning model is used when precise reasoning is required, such as when computing detailed analyses of simulation properties.
|
| 24 |
REASONING_MODEL=alias-large
|
|
|
|
| 18 |
#
|
| 19 |
|
| 20 |
# The main text generation model, used for agent responses
|
| 21 |
+
MODEL=alias-large
|
| 22 |
|
| 23 |
# Reasoning model is used when precise reasoning is required, such as when computing detailed analyses of simulation properties.
|
| 24 |
REASONING_MODEL=alias-large
|