Spaces:
Paused
Paused
Upload folder using huggingface_hub
Browse files- Dockerfile +1 -0
- app.py +10 -9
Dockerfile
CHANGED
|
@@ -17,6 +17,7 @@ COPY . .
|
|
| 17 |
ENV PYTHONUNBUFFERED=1
|
| 18 |
ENV GRADIO_SERVER_NAME="0.0.0.0"
|
| 19 |
ENV GRADIO_SERVER_PORT=7860
|
|
|
|
| 20 |
|
| 21 |
EXPOSE 7860
|
| 22 |
|
|
|
|
| 17 |
ENV PYTHONUNBUFFERED=1
|
| 18 |
ENV GRADIO_SERVER_NAME="0.0.0.0"
|
| 19 |
ENV GRADIO_SERVER_PORT=7860
|
| 20 |
+
ENV GRADIO_SSR=0
|
| 21 |
|
| 22 |
EXPOSE 7860
|
| 23 |
|
app.py
CHANGED
|
@@ -29,7 +29,6 @@ def save_persona_base(personas):
|
|
| 29 |
if not HF_TOKEN:
|
| 30 |
print("HF_TOKEN not found, skipping upload.")
|
| 31 |
return
|
| 32 |
-
# Use a temporary file to avoid conflicts
|
| 33 |
temp_file = "persona_base_upload.json"
|
| 34 |
with open(temp_file, 'w', encoding='utf-8') as f:
|
| 35 |
json.dump(personas, f, indent=4)
|
|
@@ -61,8 +60,6 @@ def render_personas_to_markdown(personas):
|
|
| 61 |
md += f"**Age**: {age} | **Gender**: {gender} | **Nationality**: {nationality}\n\n"
|
| 62 |
md += f"**Occupation**: {occupation}\n\n"
|
| 63 |
md += f"**Description**: {description}\n\n"
|
| 64 |
-
|
| 65 |
-
# Add a small expandable section for the full JSON
|
| 66 |
md += f"<details><summary>View Full JSON</summary>\n\n```json\n{json.dumps(p, indent=2)}\n```\n\n</details>\n\n"
|
| 67 |
md += "---\n"
|
| 68 |
return md
|
|
@@ -88,7 +85,7 @@ def generate_personas(business_description, customer_profile, num_personas, blab
|
|
| 88 |
)
|
| 89 |
|
| 90 |
for i in range(num_personas):
|
| 91 |
-
current_md = f"Generating persona {i+1}/{num_personas}... ⏳\n\n" + render_personas_to_markdown(all_personas_data)
|
| 92 |
yield all_personas_data, current_md, gr.update(visible=True)
|
| 93 |
|
| 94 |
person = factory.generate_person()
|
|
@@ -97,7 +94,7 @@ def generate_personas(business_description, customer_profile, num_personas, blab
|
|
| 97 |
all_personas_data.append(persona_data)
|
| 98 |
yield all_personas_data, render_personas_to_markdown(all_personas_data), gr.update(visible=True)
|
| 99 |
|
| 100 |
-
yield all_personas_data, render_personas_to_markdown(all_personas_data), gr.update(visible=False)
|
| 101 |
|
| 102 |
except GeneratorExit:
|
| 103 |
print("Generation cancelled by user.")
|
|
@@ -106,9 +103,12 @@ def generate_personas(business_description, customer_profile, num_personas, blab
|
|
| 106 |
finally:
|
| 107 |
if all_personas_data:
|
| 108 |
print(f"Saving {len(all_personas_data)} personas...")
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
|
|
|
|
|
|
|
|
|
| 112 |
|
| 113 |
if original_key is None:
|
| 114 |
if "BLABLADOR_API_KEY" in os.environ:
|
|
@@ -135,7 +135,7 @@ def find_best_persona(criteria):
|
|
| 135 |
|
| 136 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 137 |
gr.Markdown("# 🏭 Tiny Persona Factory")
|
| 138 |
-
gr.Markdown("Generate realistic personas for your business simulation.")
|
| 139 |
|
| 140 |
with gr.Row():
|
| 141 |
with gr.Column(scale=1):
|
|
@@ -183,6 +183,7 @@ def health():
|
|
| 183 |
def api_docs():
|
| 184 |
return RedirectResponse(url="/docs")
|
| 185 |
|
|
|
|
| 186 |
try:
|
| 187 |
app = gr.mount_gradio_app(app, demo, path="/", ssr=False)
|
| 188 |
except Exception:
|
|
|
|
| 29 |
if not HF_TOKEN:
|
| 30 |
print("HF_TOKEN not found, skipping upload.")
|
| 31 |
return
|
|
|
|
| 32 |
temp_file = "persona_base_upload.json"
|
| 33 |
with open(temp_file, 'w', encoding='utf-8') as f:
|
| 34 |
json.dump(personas, f, indent=4)
|
|
|
|
| 60 |
md += f"**Age**: {age} | **Gender**: {gender} | **Nationality**: {nationality}\n\n"
|
| 61 |
md += f"**Occupation**: {occupation}\n\n"
|
| 62 |
md += f"**Description**: {description}\n\n"
|
|
|
|
|
|
|
| 63 |
md += f"<details><summary>View Full JSON</summary>\n\n```json\n{json.dumps(p, indent=2)}\n```\n\n</details>\n\n"
|
| 64 |
md += "---\n"
|
| 65 |
return md
|
|
|
|
| 85 |
)
|
| 86 |
|
| 87 |
for i in range(num_personas):
|
| 88 |
+
current_md = f"#### 🔄 Generating persona {i+1}/{num_personas}... ⏳\n\n" + render_personas_to_markdown(all_personas_data)
|
| 89 |
yield all_personas_data, current_md, gr.update(visible=True)
|
| 90 |
|
| 91 |
person = factory.generate_person()
|
|
|
|
| 94 |
all_personas_data.append(persona_data)
|
| 95 |
yield all_personas_data, render_personas_to_markdown(all_personas_data), gr.update(visible=True)
|
| 96 |
|
| 97 |
+
yield all_personas_data, "### ✅ Generation complete! All personas saved to Tresor.\n\n" + render_personas_to_markdown(all_personas_data), gr.update(visible=False)
|
| 98 |
|
| 99 |
except GeneratorExit:
|
| 100 |
print("Generation cancelled by user.")
|
|
|
|
| 103 |
finally:
|
| 104 |
if all_personas_data:
|
| 105 |
print(f"Saving {len(all_personas_data)} personas...")
|
| 106 |
+
try:
|
| 107 |
+
current_base = load_persona_base()
|
| 108 |
+
current_base.extend(all_personas_data)
|
| 109 |
+
save_persona_base(current_base)
|
| 110 |
+
except Exception as se:
|
| 111 |
+
print(f"Error during final save: {se}")
|
| 112 |
|
| 113 |
if original_key is None:
|
| 114 |
if "BLABLADOR_API_KEY" in os.environ:
|
|
|
|
| 135 |
|
| 136 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 137 |
gr.Markdown("# 🏭 Tiny Persona Factory")
|
| 138 |
+
gr.Markdown("Generate realistic personas for your business simulation. Results are automatically saved to the Tresor.")
|
| 139 |
|
| 140 |
with gr.Row():
|
| 141 |
with gr.Column(scale=1):
|
|
|
|
| 183 |
def api_docs():
|
| 184 |
return RedirectResponse(url="/docs")
|
| 185 |
|
| 186 |
+
# Try mounting with ssr=False to avoid port conflicts
|
| 187 |
try:
|
| 188 |
app = gr.mount_gradio_app(app, demo, path="/", ssr=False)
|
| 189 |
except Exception:
|