ruslanmv commited on
Commit
91e5a15
·
1 Parent(s): c13125f

First commit

Browse files
Files changed (2) hide show
  1. app.py +12 -3
  2. requirements.txt +4 -4
app.py CHANGED
@@ -11,6 +11,9 @@ import textwrap
11
  import uuid
12
  from typing import List, Dict, Tuple, Generator
13
 
 
 
 
14
  # --- Load .env early (for HF_TOKEN / SECRET_TOKEN) ---
15
  from dotenv import load_dotenv
16
  load_dotenv()
@@ -188,7 +191,11 @@ def generate_text_stream(llm_instance: Llama, prompt: str,
188
  for response in stream:
189
  ch = response["choices"][0]["text"]
190
  # Guard against control tokens & isolated emoji artefacts
191
- if "<|user|>" in ch or (len(ch) == 1 and emoji.is_emoji(ch)):
 
 
 
 
192
  continue
193
  yield ch
194
 
@@ -212,7 +219,7 @@ def generate_audio_stream(tts_instance: Xtts, text: str, language: str,
212
  gr.Warning("Critical GPU error. Attempting to restart the Space...")
213
  try:
214
  api.restart_space(repo_id=repo_id)
215
- except Exception as _:
216
  pass
217
 
218
  # ===================================================================================
@@ -312,7 +319,9 @@ demo = gr.Interface(
312
  title="AI Storyteller with ZeroGPU",
313
  description="Enter a prompt to generate a short story with voice narration using on-demand GPU.",
314
  allow_flagging="never",
 
315
  )
316
 
317
  if __name__ == "__main__":
318
- demo.queue().launch()
 
 
11
  import uuid
12
  from typing import List, Dict, Tuple, Generator
13
 
14
+ # Make sure Gradio analytics is off (so we don't need pandas 2.x)
15
+ os.environ.setdefault("GRADIO_ANALYTICS_ENABLED", "False")
16
+
17
  # --- Load .env early (for HF_TOKEN / SECRET_TOKEN) ---
18
  from dotenv import load_dotenv
19
  load_dotenv()
 
191
  for response in stream:
192
  ch = response["choices"][0]["text"]
193
  # Guard against control tokens & isolated emoji artefacts
194
+ try:
195
+ is_single_emoji = (len(ch) == 1 and emoji.is_emoji(ch)) # emoji>=2.x
196
+ except Exception:
197
+ is_single_emoji = False
198
+ if "<|user|>" in ch or is_single_emoji:
199
  continue
200
  yield ch
201
 
 
219
  gr.Warning("Critical GPU error. Attempting to restart the Space...")
220
  try:
221
  api.restart_space(repo_id=repo_id)
222
+ except Exception:
223
  pass
224
 
225
  # ===================================================================================
 
319
  title="AI Storyteller with ZeroGPU",
320
  description="Enter a prompt to generate a short story with voice narration using on-demand GPU.",
321
  allow_flagging="never",
322
+ analytics_enabled=False, # <- keep analytics off to avoid pandas 2.x requirement
323
  )
324
 
325
  if __name__ == "__main__":
326
+ # For Spaces or Docker, these defaults are handy; adjust as needed.
327
+ demo.queue().launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", "7860")))
requirements.txt CHANGED
@@ -6,10 +6,10 @@ huggingface-hub>=0.19
6
  python-dotenv
7
  spaces
8
  requests
9
- numpy
10
- pandas<2.0,>=1.4
11
 
12
- # TTS
13
  TTS @ git+https://github.com/coqui-ai/TTS@v0.22.0
14
  pydantic==2.5.3
15
 
@@ -26,4 +26,4 @@ ffmpeg-python
26
 
27
  # Japanese Text (optional)
28
  mecab-python3==1.0.9
29
- unidic-lite==1.0.8
 
6
  python-dotenv
7
  spaces
8
  requests
9
+ numpy==1.26.4
10
+ pandas==1.5.3
11
 
12
+ # TTS (legacy)
13
  TTS @ git+https://github.com/coqui-ai/TTS@v0.22.0
14
  pydantic==2.5.3
15
 
 
26
 
27
  # Japanese Text (optional)
28
  mecab-python3==1.0.9
29
+ unidic-lite==1.0.8