zenityx's picture
Create app.py
aed26b5 verified
raw
history blame
1.5 kB
import gradio as gr
from transformers import pipeline
# Load a language model for analyzing text quality
nlp_model = pipeline("text-classification", model="roberta-large-mnli")
# Function to analyze prompt
def analyze_prompt(prompt):
# Simple analysis by length and keyword presence
score = len(prompt.split()) / 10
feedback = "Try to use more descriptive words and specific details."
# Generate alternative prompts
improved_prompts = [
prompt + " with cinematic lighting",
prompt + " ultra-realistic style",
prompt + " high detail, sharp focus"
]
# Limit score to a 1-10 scale
score = min(max(score, 1), 10)
return score, feedback, improved_prompts
# Gradio Interface
def main_interface():
with gr.Blocks() as demo:
gr.Markdown("# Interactive Prompt Learning Tool")
prompt_input = gr.Textbox(label="Enter your prompt for AI image generation")
submit_button = gr.Button("Analyze Prompt")
with gr.Row():
score_output = gr.Number(label="Prompt Quality Score (1-10)")
feedback_output = gr.Textbox(label="Feedback for Improvement")
improved_output = gr.Textbox(label="Improved Prompt Suggestions", lines=3)
submit_button.click(
analyze_prompt,
inputs=prompt_input,
outputs=[score_output, feedback_output, improved_output]
)
demo.launch()
# Launch the app
if __name__ == "__main__":
main_interface()