Sketchtools / app.py
Doubleupai's picture
Create app.py
b8d2b0c verified
import gradio as gr
def image_generation(category, model, prompt, unwanted, format, style):
# Image generation logic here
return "Image generated in PNG format."
def video_generation(model, image, prompt):
# Video generation logic here
return "Video generated."
def audio_generation(model, song_title, lyrics, genre):
# Audio generation logic here
return "Music generated."
def text_generation(model, question):
# Text generation logic here
return "Generated response."
def text_to_speech(model, language, voice, text):
# Text to speech logic here
return "Audio generated."
def deepfake(model, image, face):
# Deepfake generation logic here
return "Deepfake generated."
with gr.Blocks() as app:
gr.Markdown("# Dropskale")
with gr.Tab("Image Generation"):
category = gr.Dropdown(["image generation"], label="Category")
model = gr.Dropdown(["midjourney", "ideogram", "leonardo ai", "flux 1.1 pro ultra", "flux 1.1 dev"], label="Model")
prompt = gr.Textbox(label="Prompt")
unwanted = gr.Textbox(label="What you don't want to see")
format = gr.Dropdown(["16:9", "9:16", "4:3", "1:1"], label="Format")
style = gr.Textbox(label="Style")
generate_btn = gr.Button("Generate Image")
output = gr.Output()
generate_btn.click(image_generation, inputs=[category, model, prompt, unwanted, format, style], outputs=output)
with gr.Tab("Video Generation"):
model = gr.Dropdown(["luma dream machine 1.6", "runway gen-3 turbo", "kling ai 1.5", "hailuo ai"], label="Model")
image = gr.Image(label="Select Image")
prompt = gr.Textbox(label="Prompt")
generate_btn = gr.Button("Generate Video")
output = gr.Output()
generate_btn.click(video_generation, inputs=[model, image, prompt], outputs=output)
with gr.Tab("Audio Generation"):
model = gr.Dropdown(["udio 1.5", "suno 4.0"], label="Model")
song_title = gr.Textbox(label="Song Title")
lyrics = gr.Textbox(label="Lyrics")
genre = gr.Textbox(label="Genre")
generate_btn = gr.Button("Generate Music")
output = gr.Output()
generate_btn.click(audio_generation, inputs=[model, song_title, lyrics, genre], outputs=output)
with gr.Tab("Text Generation"):
model = gr.Dropdown(["chatgpt models", "claude 3.5 sonnet", "gemini pro 1.5", "mistral", "writesonic"], label="Model")
question = gr.Textbox(label="Question")
generate_btn = gr.Button("Generate Response")
output = gr.Output()
generate_btn.click(text_generation, inputs=[model, question], outputs=output)
with gr.Tab("Text to Speech"):
model = gr.Dropdown(["elevenlabs turbo 2.5", "dupdub", "open ai tts"], label="Model")
language = gr.Dropdown(["English", "Spanish", "French"], label="Language")
voice = gr.Dropdown(["Voice 1", "Voice 2"], label="Voice")
text = gr.Textbox(label="Text")
generate_btn = gr.Button("Generate Audio")
output = gr.Output()
generate_btn.click(text_to_speech, inputs=[model, language, voice, text], outputs=output)
with gr.Tab("Deepfake"):
model = gr.Dropdown(["plurkface", "deeplabface"], label="Model")
image = gr.Image(label="Upload Image or Video")
face = gr.Image(label="Upload Another Face")
generate_btn = gr.Button("Generate Deepfake")
output = gr.Output()
generate_btn.click(deepfake, inputs=[model, image, face], outputs=output)
app.launch()