owaski commited on
Commit
d07cbce
·
1 Parent(s): 215a12d

add example usage

Browse files
Files changed (1) hide show
  1. app.py +33 -9
app.py CHANGED
@@ -478,21 +478,40 @@ default_instruct=(
478
  "less than 15 words."
479
  )
480
  import pandas as pd
481
- examples = pd.DataFrame([
482
- ["General Purpose Conversation", default_instruct],
483
- ["Translation", "You are a translator. Translate user text into English."],
484
- ["General Purpose Conversation with Disfluencies", "Please reply to user with lot of filler words like ummm, so"],
485
- ["Summarization", "You are summarizer. Summarize user's utterance."]
486
- ], columns=["Task", "LLM Prompt"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
487
  with gr.Blocks(
488
  title="ESPnet-SDS Offline Audio Processing",
489
  ) as demo:
490
  with gr.Row():
491
  gr.Markdown(
492
  """
493
- ## ESPnet-SDS (Offline Mode)
494
- Welcome to our offline audio processing interface for various cascaded and
495
- E2E spoken dialogue systems built using ESPnet-SDS toolkit.
496
 
497
  **How to use:**
498
  1. Upload or record an audio file
@@ -554,6 +573,11 @@ with gr.Blocks(
554
  value="mini-omni",
555
  visible=False,
556
  )
 
 
 
 
 
557
  with gr.Column(scale=1):
558
  output_asr_text = gr.Textbox(label="ASR Transcription", interactive=False)
559
  output_text = gr.Textbox(label="LLM Response", interactive=False)
 
478
  "less than 15 words."
479
  )
480
  import pandas as pd
481
+
482
+ # Usage examples: (label, prompt, TTS_display_name)
483
+ # Use TTS_reverse_map[TTS_name] for default TTS; or a specific TTS_display_names entry
484
+ example_translate_ja_prompt = "You are a translator. Translate what user says into Japanese."
485
+ example_summarize_prompt = "Your task is to summarize what user says in one short sentence no more than 10 words."
486
+ example_story_prompt = (
487
+ "You are an Imaginative Story Writer.\n\n"
488
+ "Given user input, craft compelling fiction with:\n"
489
+ "- strong scene setting\n"
490
+ "- character voice\n"
491
+ "- emotional arc\n"
492
+ "- satisfying narrative progression\n\n"
493
+ "Guidelines:\n"
494
+ "- Respect user-provided anchors (characters, world, plot constraints).\n"
495
+ "- Expand creatively between anchors with sensory detail and subtext.\n"
496
+ "- Use \"show, don't tell\" where possible.\n"
497
+ "- Maintain internal logic and continuity.\n"
498
+ "- End scenes with momentum unless user requests a full ending.\n"
499
+ "- Output only story content."
500
+ )
501
+
502
+ examples = [
503
+ ["Translation to Japanese", example_translate_ja_prompt, TTS_display_names[1]],
504
+ ["Summarization (≤10 words)", example_summarize_prompt, TTS_reverse_map[TTS_name]],
505
+ ["Imaginative Story Writer", example_story_prompt, TTS_reverse_map[TTS_name]],
506
+ ]
507
  with gr.Blocks(
508
  title="ESPnet-SDS Offline Audio Processing",
509
  ) as demo:
510
  with gr.Row():
511
  gr.Markdown(
512
  """
513
+ ## ESPnet Prompt Editing Demo
514
+ Welcome to our prompt editing demo using ESPnet-SDS toolkit.
 
515
 
516
  **How to use:**
517
  1. Upload or record an audio file
 
573
  value="mini-omni",
574
  visible=False,
575
  )
576
+ gr.Examples(
577
+ examples=[[row[1], row[2]] for row in examples],
578
+ inputs=[input_text, radio],
579
+ label="Usage examples",
580
+ )
581
  with gr.Column(scale=1):
582
  output_asr_text = gr.Textbox(label="ASR Transcription", interactive=False)
583
  output_text = gr.Textbox(label="LLM Response", interactive=False)