Ramkumar commited on
Commit
81ad461
·
verified ·
1 Parent(s): 04486d7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -11
app.py CHANGED
@@ -5,14 +5,9 @@ from PIL import Image
5
  import requests
6
  import io
7
  import time
8
- import whisper
9
- import gradio as gr
10
  from groq import Groq
11
- from deep_translator import GoogleTranslator
12
  import torch
13
 
14
-
15
- # Replace with your actual Hugging Face API details
16
  os.environ['hugging']
17
  H_key = os.getenv('hugging')
18
  API_URL = "https://api-inference.huggingface.co/models/Artples/LAI-ImageGeneration-vSDXL-2"
@@ -94,17 +89,56 @@ def process_audio_or_text(input_text, audio_path, generate_image_flag):
94
 
95
  if generate_image_flag: # Generate image if the checkbox is checked
96
  image = generate_image(translation)
97
-
98
  return tamil_text, chatbot_response, image # Return both chatbot response and image (if generated)
99
 
100
- with gr.Blocks() as iface:
101
- gr.Markdown("# AI Chatbot and Image Generation App")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
 
103
  with gr.Row():
104
  with gr.Column(scale=1): # Left side (Inputs and Buttons)
105
  user_input = gr.Textbox(label="Enter Tamil text", placeholder="Type your message here...")
106
- audio_input = gr.Audio(type="filepath", label=" Or upload audio (for Image Generation)")
107
- image_generation_checkbox = gr.Checkbox(label="Generate Image", value=False)
108
 
109
  # Buttons
110
  submit_btn = gr.Button("Submit")
@@ -119,7 +153,7 @@ with gr.Blocks() as iface:
119
  submit_btn.click(fn=process_audio_or_text,
120
  inputs=[user_input, audio_input, image_generation_checkbox],
121
  outputs=[text_output_1, text_output_2, image_output])
122
-
123
  clear_btn.click(lambda: ("", None, False, "", "", None),
124
  inputs=[],
125
  outputs=[user_input, audio_input, image_generation_checkbox, text_output_1, text_output_2, image_output])
 
5
  import requests
6
  import io
7
  import time
 
 
8
  from groq import Groq
 
9
  import torch
10
 
 
 
11
  os.environ['hugging']
12
  H_key = os.getenv('hugging')
13
  API_URL = "https://api-inference.huggingface.co/models/Artples/LAI-ImageGeneration-vSDXL-2"
 
89
 
90
  if generate_image_flag: # Generate image if the checkbox is checked
91
  image = generate_image(translation)
92
+
93
  return tamil_text, chatbot_response, image # Return both chatbot response and image (if generated)
94
 
95
+ # Custom CSS for improved styling and centered title
96
+ css = """
97
+ .gradio-container {
98
+ font-family: 'Georgia', serif;
99
+ background-color: #f5f5f5;
100
+ padding: 20px;
101
+ color: #000000;
102
+ }
103
+ .gr-row {
104
+ box-shadow: 0px 4px 12px rgba(0, 0, 0, 0.1);
105
+ background-color: #ffffff;
106
+ border-radius: 10px;
107
+ padding: 20px;
108
+ margin: 10px 0;
109
+ }
110
+ .gr-button {
111
+ background-color: #8b4513;
112
+ color: white;
113
+ font-size: 16px;
114
+ border-radius: 5px;
115
+ }
116
+ .gr-button:hover {
117
+ background-color: #6a3511;
118
+ }
119
+ .gr-checkbox-label {
120
+ font-weight: bold;
121
+ }
122
+ .gr-image {
123
+ border-radius: 10px;
124
+ box-shadow: 0px 4px 12px rgba(0, 0, 0, 0.1);
125
+ }
126
+ #main-title {
127
+ text-align: center;
128
+ font-size: 28px;
129
+ font-weight: bold;
130
+ color: #8b4513;
131
+ }
132
+ """
133
+
134
+ with gr.Blocks(css=css) as iface:
135
+ gr.Markdown("<h1 id='main-title'>🖼️ AI Chatbot and Image Generation App</h1>")
136
 
137
  with gr.Row():
138
  with gr.Column(scale=1): # Left side (Inputs and Buttons)
139
  user_input = gr.Textbox(label="Enter Tamil text", placeholder="Type your message here...")
140
+ audio_input = gr.Audio(type="filepath", label="Or upload audio (for Image Generation)")
141
+ image_generation_checkbox = gr.Checkbox(label="Generate Image", value=True)
142
 
143
  # Buttons
144
  submit_btn = gr.Button("Submit")
 
153
  submit_btn.click(fn=process_audio_or_text,
154
  inputs=[user_input, audio_input, image_generation_checkbox],
155
  outputs=[text_output_1, text_output_2, image_output])
156
+
157
  clear_btn.click(lambda: ("", None, False, "", "", None),
158
  inputs=[],
159
  outputs=[user_input, audio_input, image_generation_checkbox, text_output_1, text_output_2, image_output])