zenityx commited on
Commit
6ffb7a8
·
verified ·
1 Parent(s): c2fd0f9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +161 -35
app.py CHANGED
@@ -1,48 +1,174 @@
 
 
 
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
- # Load a language model for analyzing text quality
5
- nlp_model = pipeline("text-classification", model="roberta-large-mnli")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
- # Function to analyze prompt
8
 
9
- def analyze_prompt(prompt):
10
- # Simple analysis by length and keyword presence
11
- score = len(prompt.split()) / 10
12
- feedback = "Try to use more descriptive words and specific details."
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
- # Generate alternative prompts
15
- improved_prompts = [
16
- prompt + " with cinematic lighting",
17
- prompt + " ultra-realistic style",
18
- prompt + " high detail, sharp focus"
19
- ]
 
 
 
 
 
 
 
 
 
 
 
20
 
21
- # Limit score to a 1-10 scale
22
- score = min(max(score, 1), 10)
23
- return score, feedback, improved_prompts
 
24
 
25
- # Gradio Interface
26
- def main_interface():
27
- with gr.Blocks() as demo:
28
- gr.Markdown("# Interactive Prompt Learning Tool")
29
- prompt_input = gr.Textbox(label="Enter your prompt for AI image generation")
30
- submit_button = gr.Button("Analyze Prompt")
31
 
32
- with gr.Row():
33
- score_output = gr.Number(label="Prompt Quality Score (1-10)")
34
- feedback_output = gr.Textbox(label="Feedback for Improvement")
 
 
 
 
 
35
 
36
- improved_output = gr.Textbox(label="Improved Prompt Suggestions", lines=3)
 
 
 
 
 
 
 
37
 
38
- submit_button.click(
39
- analyze_prompt,
40
- inputs=prompt_input,
41
- outputs=[score_output, feedback_output, improved_output]
42
- )
43
 
44
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
- # Launch the app
47
- if __name__ == "__main__":
48
- main_interface()
 
 
1
+ # app.py
2
+
3
+ !pip install gradio transformers
4
+
5
  import gradio as gr
6
  from transformers import pipeline
7
 
8
+ # ---------------------------------------------------------
9
+ # 1. Setup: Load a text2text-generation model from Hugging Face
10
+ # (You can choose another model if you prefer.)
11
+ # ---------------------------------------------------------
12
+ prompt_analyzer = pipeline("text2text-generation", model="google/flan-t5-base")
13
+
14
+ # ---------------------------------------------------------
15
+ # 2. Define a function to analyze the user’s prompt
16
+ # - Returns a score (1–10)
17
+ # - Provides feedback on clarity/creativity/completeness
18
+ # - Suggests 3 improved versions of the prompt
19
+ # ---------------------------------------------------------
20
+ def analyze_prompt(user_prompt: str):
21
+ """
22
+ Uses a text-to-text model to rate the prompt, provide feedback,
23
+ and generate three improved prompt suggestions.
24
+
25
+ Parameters:
26
+ user_prompt (str): The user's original image-generation prompt.
27
+
28
+ Returns:
29
+ (str, str, str): A tuple containing:
30
+ 1) Score (as text, 1–10),
31
+ 2) Feedback (as a longer text),
32
+ 3) Three improved prompt suggestions (as a single multiline string).
33
+ """
34
+
35
+ if not user_prompt.strip():
36
+ return "N/A", "Please enter a valid prompt.", "No suggestions available."
37
+
38
+ # Prepare an instruction to guide the model
39
+ # The model output will contain rating, feedback, and 3 improvements.
40
+ instruction = f"""
41
+ You are an expert prompt engineer. Analyze the following prompt for an AI image generation model.
42
+
43
+ Prompt: {user_prompt}
44
+
45
+ 1. Rate the prompt on a scale of 1 to 10 based on clarity, creativity, and completeness.
46
+ 2. Provide a short explanation (feedback) for your rating.
47
+ 3. Give three improved versions of the prompt to make it more descriptive or more creative.
48
 
49
+ Format your response in the following structure:
50
 
51
+ Rating: X
52
+ Feedback: <your feedback here>
53
+ Improvements:
54
+ 1. <improvement 1>
55
+ 2. <improvement 2>
56
+ 3. <improvement 3>
57
+ """
58
+
59
+ # Generate model output
60
+ model_response = prompt_analyzer(instruction, max_length=200)[0]['generated_text']
61
+
62
+ # Parse the output (very basic parsing logic for demonstration)
63
+ # We'll look for specific keywords: "Rating:", "Feedback:", "Improvements:"
64
+ rating = "N/A"
65
+ feedback = ""
66
+ suggestions = ""
67
 
68
+ # Basic approach to splitting the text
69
+ for line in model_response.split("\n"):
70
+ line = line.strip()
71
+ if line.lower().startswith("rating:"):
72
+ rating = line.split(":", 1)[1].strip()
73
+ elif line.lower().startswith("feedback:"):
74
+ feedback = line.split(":", 1)[1].strip()
75
+ elif line.lower().startswith("1.") or line.lower().startswith("- 1."):
76
+ # Start collecting improvements
77
+ suggestions += line + "\n"
78
+ elif line.lower().startswith("2.") or line.lower().startswith("- 2."):
79
+ suggestions += line + "\n"
80
+ elif line.lower().startswith("3.") or line.lower().startswith("- 3."):
81
+ suggestions += line + "\n"
82
+ elif line.lower().startswith("improvements:"):
83
+ # If the model has a separate "Improvements:" heading
84
+ suggestions += "\n"
85
 
86
+ # If the model didn't output lines with numbering, just set a fallback
87
+ if not suggestions.strip():
88
+ # We might handle the entire text as suggestions or produce a fallback
89
+ suggestions = "Could not parse suggestions properly.\n" + model_response
90
 
91
+ return rating, feedback, suggestions
 
 
 
 
 
92
 
93
+ # ---------------------------------------------------------
94
+ # 3. Define some example prompts for reference
95
+ # ---------------------------------------------------------
96
+ example_prompts = [
97
+ "A majestic dragon soaring above a medieval castle, fantasy art style, highly detailed",
98
+ "A peaceful countryside landscape with rolling hills and a small cottage at sunset",
99
+ "A cyberpunk city scene with neon lights, flying cars, and towering skyscrapers",
100
+ ]
101
 
102
+ # ---------------------------------------------------------
103
+ # 4. Build the Gradio interface
104
+ # ---------------------------------------------------------
105
+ def set_example_prompt(example):
106
+ """
107
+ Utility function to load an example prompt into the text input box.
108
+ """
109
+ return example
110
 
111
+ with gr.Blocks() as demo:
112
+ gr.Markdown(
113
+ """
114
+ # Interactive Prompt Engineering App
115
+ **Learn how to craft better prompts for AI image generation.**
116
 
117
+ 1. Enter your prompt below.
118
+ 2. Click "Evaluate Prompt" to get a **score**, **feedback**, and **3 improved prompts**.
119
+ 3. Use the dropdown to load example prompts for inspiration.
120
+ """
121
+ )
122
+
123
+ with gr.Row():
124
+ with gr.Column():
125
+ # Dropdown to select an example
126
+ example_dropdown = gr.Dropdown(
127
+ label="Choose an example prompt to load",
128
+ choices=example_prompts,
129
+ value=None,
130
+ interactive=True
131
+ )
132
+
133
+ # Textbox for user prompt input
134
+ user_prompt_input = gr.Textbox(
135
+ label="Enter your prompt here:",
136
+ lines=4,
137
+ placeholder="E.g. 'A futuristic cityscape with neon lights at night, highly detailed...'"
138
+ )
139
+
140
+ # Button to set example prompt in the textbox
141
+ load_example_btn = gr.Button("Load Example Prompt")
142
+
143
+ # Button to analyze the user's prompt
144
+ analyze_btn = gr.Button("Evaluate Prompt")
145
+
146
+ with gr.Column():
147
+ score_output = gr.Textbox(
148
+ label="Prompt Quality Score (1-10)",
149
+ interactive=False
150
+ )
151
+ feedback_output = gr.Textbox(
152
+ label="Feedback",
153
+ lines=3,
154
+ interactive=False
155
+ )
156
+ suggestions_output = gr.Textbox(
157
+ label="Improved Prompt Suggestions",
158
+ lines=6,
159
+ interactive=False
160
+ )
161
+
162
+ # Define the interactions
163
+ load_example_btn.click(fn=set_example_prompt,
164
+ inputs=[example_dropdown],
165
+ outputs=[user_prompt_input])
166
+
167
+ analyze_btn.click(fn=analyze_prompt,
168
+ inputs=[user_prompt_input],
169
+ outputs=[score_output, feedback_output, suggestions_output])
170
 
171
+ # ---------------------------------------------------------
172
+ # 5. Launch the Gradio app
173
+ # ---------------------------------------------------------
174
+ demo.launch()