Brunwo commited on
Commit
4dba2c2
·
1 Parent(s): 276796f

WIP 2 , not compiling (checkbx conditional rendering)

Browse files
Files changed (4) hide show
  1. README.md +11 -0
  2. app.py +62 -38
  3. script.js +13 -2
  4. testGradioAPI.py +11 -10
README.md CHANGED
@@ -14,6 +14,17 @@ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-
14
 
15
 
16
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
 
19
  openssl req -new -x509 -keyout server.pem -out server.pem -days 365 -nodes
 
14
 
15
 
16
 
17
+ this adds
18
+ - openAI API key loaded via .env file
19
+ - other theme
20
+ - relocated i18n translation (via gettext)
21
+ - changed functions to get cleaner API for generation
22
+
23
+ todo :
24
+ - number of words in params
25
+
26
+
27
+
28
 
29
 
30
  openssl req -new -x509 -keyout server.pem -out server.pem -days 365 -nodes
app.py CHANGED
@@ -256,21 +256,29 @@ def generate_audio(
256
  speaker_1_voice: str = "alloy",
257
  speaker_2_voice: str = "echo",
258
  api_base: str = None,
259
- intro_instructions: str = INSTRUCTION_TEMPLATES["podcast"]["intro"],
260
- text_instructions: str = INSTRUCTION_TEMPLATES["podcast"]["text_instructions"],
261
- scratch_pad_instructions: str = INSTRUCTION_TEMPLATES["podcast"]["scratch_pad"],
262
- prelude_dialog: str = INSTRUCTION_TEMPLATES["podcast"]["prelude"],
263
- podcast_dialog_instructions: str = INSTRUCTION_TEMPLATES["podcast"]["dialog"],
264
  edited_transcript: str = None,
265
  user_feedback: str = None,
266
  original_text: str = None,
267
  debug = False,
 
 
268
  ) -> tuple:
269
-
270
 
271
  if not url:
272
  return None, None, None, "Please provide a valid URL before generating audio."
273
 
 
 
 
 
 
 
 
274
  try:
275
 
276
  # Validate API Key
@@ -499,37 +507,47 @@ with gr.Blocks(theme='lone17/kotaemon', title="Text to Audio") as demo:
499
  value="podcast",
500
  info="Select the instruction template to use. You can also edit any of the fields for more tailored results.",
501
  )
502
- intro_instructions = gr.Textbox(
503
- label="Intro Instructions",
504
- lines=10,
505
- value=INSTRUCTION_TEMPLATES["podcast"]["intro"],
506
- info="Provide the introductory instructions for generating the dialogue.",
507
- )
508
- text_instructions = gr.Textbox(
509
- label="Standard Text Analysis Instructions",
510
- lines=10,
511
- placeholder="Enter text analysis instructions...",
512
- value=INSTRUCTION_TEMPLATES["podcast"]["text_instructions"],
513
- info="Provide the instructions for analyzing the raw data and text.",
514
- )
515
- scratch_pad_instructions = gr.Textbox(
516
- label="Scratch Pad Instructions",
517
- lines=15,
518
- value=INSTRUCTION_TEMPLATES["podcast"]["scratch_pad"],
519
- info="Provide the scratch pad instructions for brainstorming presentation/dialogue content.",
520
- )
521
- prelude_dialog = gr.Textbox(
522
- label="Prelude Dialog",
523
- lines=5,
524
- value=INSTRUCTION_TEMPLATES["podcast"]["prelude"],
525
- info="Provide the prelude instructions before the presentation/dialogue is developed.",
526
- )
527
- podcast_dialog_instructions = gr.Textbox(
528
- label="Podcast Dialog Instructions",
529
- lines=20,
530
- value=INSTRUCTION_TEMPLATES["podcast"]["dialog"],
531
- info="Provide the instructions for generating the presentation or podcast dialogue.",
532
- )
 
 
 
 
 
 
 
 
 
 
533
 
534
  audio_output = gr.Audio(label="Audio", format="mp3", interactive=False, autoplay=False)
535
  transcript_output = gr.Textbox(label="Transcript", lines=20, show_copy_button=True)
@@ -569,12 +587,18 @@ with gr.Blocks(theme='lone17/kotaemon', title="Text to Audio") as demo:
569
  submit_btn.click(
570
  fn=generate_audio,
571
  inputs=[
 
 
 
 
 
 
572
  url_input, openai_api_key, text_model, audio_model,
573
  speaker_1_voice, speaker_2_voice, api_base,
574
  intro_instructions, text_instructions, scratch_pad_instructions,
575
  prelude_dialog, podcast_dialog_instructions,
576
  edited_transcript,
577
- user_feedback,
578
  ],
579
  outputs=[audio_output, transcript_output, original_text_output, error_output]
580
  ).then(
 
256
  speaker_1_voice: str = "alloy",
257
  speaker_2_voice: str = "echo",
258
  api_base: str = None,
259
+ intro_instructions: str = None,
260
+ text_instructions: str = None ,
261
+ scratch_pad_instructions: str = None ,
262
+ prelude_dialog: str = None,
263
+ podcast_dialog_instructions: str = None,
264
  edited_transcript: str = None,
265
  user_feedback: str = None,
266
  original_text: str = None,
267
  debug = False,
268
+ # template_dropdown : str = "", = original text ?
269
+ use_default_template : bool = False,
270
  ) -> tuple:
 
271
 
272
  if not url:
273
  return None, None, None, "Please provide a valid URL before generating audio."
274
 
275
+ if use_default_template:
276
+ intro_instructions = INSTRUCTION_TEMPLATES[original_text]["intro"]
277
+ text_instructions = INSTRUCTION_TEMPLATES[original_text]["text_instructions"]
278
+ scratch_pad_instructions = INSTRUCTION_TEMPLATES[original_text]["scratch_pad"]
279
+ prelude_dialog = INSTRUCTION_TEMPLATES[original_text]["prelude"]
280
+ podcast_dialog_instructions = INSTRUCTION_TEMPLATES[original_text]["dialog"]
281
+
282
  try:
283
 
284
  # Validate API Key
 
507
  value="podcast",
508
  info="Select the instruction template to use. You can also edit any of the fields for more tailored results.",
509
  )
510
+ default_template_checkbox = gr.Checkbox(label="skip all template customization")
511
+
512
+
513
+
514
+ @gr.render(inputs=default_template_checkbox)
515
+ def show_customization(checkbox):
516
+ if not checkbox:
517
+ gr.Markdown("## No customization")
518
+ else:
519
+
520
+ intro_instructions = gr.Textbox(
521
+ label="Intro Instructions",
522
+ lines=10,
523
+ value=INSTRUCTION_TEMPLATES["podcast"]["intro"],
524
+ info="Provide the introductory instructions for generating the dialogue.",
525
+ )
526
+ text_instructions = gr.Textbox(
527
+ label="Standard Text Analysis Instructions",
528
+ lines=10,
529
+ placeholder="Enter text analysis instructions...",
530
+ value=INSTRUCTION_TEMPLATES["podcast"]["text_instructions"],
531
+ info="Provide the instructions for analyzing the raw data and text.",
532
+ )
533
+ scratch_pad_instructions = gr.Textbox(
534
+ label="Scratch Pad Instructions",
535
+ lines=15,
536
+ value=INSTRUCTION_TEMPLATES["podcast"]["scratch_pad"],
537
+ info="Provide the scratch pad instructions for brainstorming presentation/dialogue content.",
538
+ )
539
+ prelude_dialog = gr.Textbox(
540
+ label="Prelude Dialog",
541
+ lines=5,
542
+ value=INSTRUCTION_TEMPLATES["podcast"]["prelude"],
543
+ info="Provide the prelude instructions before the presentation/dialogue is developed.",
544
+ )
545
+ podcast_dialog_instructions = gr.Textbox(
546
+ label="Podcast Dialog Instructions",
547
+ lines=20,
548
+ value=INSTRUCTION_TEMPLATES["podcast"]["dialog"],
549
+ info="Provide the instructions for generating the presentation or podcast dialogue.",
550
+ )
551
 
552
  audio_output = gr.Audio(label="Audio", format="mp3", interactive=False, autoplay=False)
553
  transcript_output = gr.Textbox(label="Transcript", lines=20, show_copy_button=True)
 
587
  submit_btn.click(
588
  fn=generate_audio,
589
  inputs=[
590
+ url_input, openai_api_key, text_model, audio_model,
591
+ speaker_1_voice, speaker_2_voice, api_base,
592
+ None,None,None,None,None,
593
+ edited_transcript,
594
+ user_feedback,template_dropdown,default_template_checkbox
595
+ if default_template_checkbox else
596
  url_input, openai_api_key, text_model, audio_model,
597
  speaker_1_voice, speaker_2_voice, api_base,
598
  intro_instructions, text_instructions, scratch_pad_instructions,
599
  prelude_dialog, podcast_dialog_instructions,
600
  edited_transcript,
601
+ user_feedback,template_dropdown,default_template_checkbox
602
  ],
603
  outputs=[audio_output, transcript_output, original_text_output, error_output]
604
  ).then(
script.js CHANGED
@@ -13,7 +13,7 @@ document.addEventListener("DOMContentLoaded", function() {
13
  url: link,
14
  // Add other necessary parameters here
15
  openai_api_key: 'your_api_key_here', // Be cautious with API keys in client-side code
16
- text_model: 'o1-preview-2024-09-12',
17
  audio_model: 'tts-1',
18
  speaker_1_voice: 'alloy',
19
  speaker_2_voice: 'echo',
@@ -75,4 +75,15 @@ document.addEventListener("DOMContentLoaded", function() {
75
  });
76
  }
77
 
78
-
 
 
 
 
 
 
 
 
 
 
 
 
13
  url: link,
14
  // Add other necessary parameters here
15
  openai_api_key: 'your_api_key_here', // Be cautious with API keys in client-side code
16
+ text_model: 'gpt-4o-mini',
17
  audio_model: 'tts-1',
18
  speaker_1_voice: 'alloy',
19
  speaker_2_voice: 'echo',
 
75
  });
76
  }
77
 
78
+ // JavaScript code to read URL parameters
79
+ const urlParams = new URLSearchParams(window.location.search);
80
+
81
+ // Retrieve specific parameters
82
+ const name = urlParams.get('name'); // "John"
83
+ const age = urlParams.get('age'); // "30"
84
+
85
+ // Display the parameters in the output div
86
+ document.getElementById('shared-content').innerHTML = `
87
+ <p>Name: ${name}</p>
88
+ <p>Age: ${age}</p>
89
+ `;
testGradioAPI.py CHANGED
@@ -2,15 +2,16 @@ from gradio_client import Client
2
 
3
  client = Client("http://127.0.0.1:7860/")
4
  result = client.predict(
5
- param_0="Hello!!",
6
- param_1="Hello!!",
7
- param_2="o1-preview-2024-09-12",
8
- param_3="tts-1",
9
- param_4="alloy",
10
- param_5="echo",
11
- param_6="Hello!!",
12
- param_12="Hello!!",
13
- param_13="Hello!!",
14
- api_name="/validate_and_generate_audio"
 
15
  )
16
  print(result)
 
2
 
3
  client = Client("http://127.0.0.1:7860/")
4
  result = client.predict(
5
+ url="https://www.infoq.com/articles/ai-ml-data-engineering-trends-2024/",
6
+ use_default_template="summary",
7
+ openai_api_key=None,
8
+ text_model="gpt-4o-mini",
9
+ audio_model="tts-1",
10
+ speaker_1_voice="alloy",
11
+ speaker_2_voice="echo",
12
+ api_base=None,
13
+ edited_transcript=None,
14
+ user_feedback=None,
15
+ api_name="/generate_audio"
16
  )
17
  print(result)