| import gradio as gr |
| from transformers import pipeline |
|
|
| |
| nlp_model = pipeline("text-classification", model="roberta-large-mnli") |
|
|
| |
|
|
| def analyze_prompt(prompt): |
| |
| score = len(prompt.split()) / 10 |
| feedback = "Try to use more descriptive words and specific details." |
| |
| |
| improved_prompts = [ |
| prompt + " with cinematic lighting", |
| prompt + " ultra-realistic style", |
| prompt + " high detail, sharp focus" |
| ] |
| |
| |
| score = min(max(score, 1), 10) |
| return score, feedback, improved_prompts |
|
|
| |
| def main_interface(): |
| with gr.Blocks() as demo: |
| gr.Markdown("# Interactive Prompt Learning Tool") |
| prompt_input = gr.Textbox(label="Enter your prompt for AI image generation") |
| submit_button = gr.Button("Analyze Prompt") |
|
|
| with gr.Row(): |
| score_output = gr.Number(label="Prompt Quality Score (1-10)") |
| feedback_output = gr.Textbox(label="Feedback for Improvement") |
|
|
| improved_output = gr.Textbox(label="Improved Prompt Suggestions", lines=3) |
|
|
| submit_button.click( |
| analyze_prompt, |
| inputs=prompt_input, |
| outputs=[score_output, feedback_output, improved_output] |
| ) |
|
|
| demo.launch() |
|
|
| |
| if __name__ == "__main__": |
| main_interface() |
|
|