minakshi.mathpal commited on
Commit
a72d5ef
·
1 Parent(s): 3212edf

refactored app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -8
app.py CHANGED
@@ -3,9 +3,7 @@ import torch
3
  import random
4
  import time
5
  from PIL import Image
6
- from utils import StableDiffusionConfig, StableDiffusionModels, generate_image
7
-
8
- # Set page config
9
  st.set_page_config(
10
  page_title="Butterfly Color Diffusion",
11
  page_icon="🦋",
@@ -32,10 +30,11 @@ def load_models():
32
  max_length=77
33
  )
34
  models = StableDiffusionModels(config)
 
35
  with st.spinner("Loading Stable Diffusion models... This may take a minute."):
36
  models.load_models()
37
  models.set_timesteps()
38
- return models, config
39
 
40
  # Title and description
41
  st.title("🦋 Butterfly Color Diffusion")
@@ -55,6 +54,32 @@ prompt = st.sidebar.text_area(
55
  height=100
56
  )
57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  steps = st.sidebar.slider("Inference Steps", min_value=10, max_value=100, value=30, step=1)
59
  guidance_scale = st.sidebar.slider("Guidance Scale", min_value=1.0, max_value=15.0, value=7.5, step=0.1)
60
  seed = st.sidebar.number_input("Seed (0 for random)", min_value=0, max_value=1000000, value=0, step=1)
@@ -79,7 +104,7 @@ with col2:
79
  # Load models when needed
80
  if standard_button or color_button:
81
  if st.session_state.models is None:
82
- st.session_state.models, st.session_state.config = load_models()
83
 
84
  # Update config with current settings
85
  st.session_state.config.num_inference_steps = steps
@@ -98,17 +123,23 @@ if standard_button:
98
  progress_bar = st.progress(0)
99
  start_time = time.time()
100
 
101
- image = generate_image(
102
  models=st.session_state.models,
103
  config=st.session_state.config,
 
104
  prompt=prompt,
 
105
  blue_loss_scale=0,
106
  yellow_loss_scale=0,
 
107
  progress_bar=progress_bar
108
  )
109
 
110
  end_time = time.time()
111
- st.image(image, caption="Standard Stable Diffusion", use_column_width=True)
 
 
 
112
  st.write(f"Generation time: {end_time - start_time:.2f} seconds")
113
 
114
  # Generate color-guided image
@@ -122,6 +153,7 @@ if color_button:
122
  models=st.session_state.models,
123
  config=st.session_state.config,
124
  prompt=prompt,
 
125
  blue_loss_scale=0,
126
  yellow_loss_scale=yellow_strength,
127
  guidance_interval=guidance_interval,
@@ -129,7 +161,10 @@ if color_button:
129
  )
130
 
131
  end_time = time.time()
132
- st.image(image, caption="Color-Guided Stable Diffusion", use_column_width=True)
 
 
 
133
  st.write(f"Generation time: {end_time - start_time:.2f} seconds")
134
 
135
  # Explanation section
@@ -146,6 +181,9 @@ The color-guided approach adds a custom loss function during the diffusion proce
146
 
147
  This combination creates a yellow tone in the final image. The strength parameter controls how strongly this color guidance affects the generation process.
148
 
 
 
 
149
  ### Technical Details
150
  During each step of the diffusion process, we:
151
  1. Calculate the predicted image at that step
 
3
  import random
4
  import time
5
  from PIL import Image
6
+ from custom_stable_diffusion import StableDiffusionConfig, StableDiffusionModels,ImageProcessor, generate_with_multiple_concepts,generate_with_concept_and_color,
 
 
7
  st.set_page_config(
8
  page_title="Butterfly Color Diffusion",
9
  page_icon="🦋",
 
30
  max_length=77
31
  )
32
  models = StableDiffusionModels(config)
33
+ image_processor = ImageProcessor(models, config)
34
  with st.spinner("Loading Stable Diffusion models... This may take a minute."):
35
  models.load_models()
36
  models.set_timesteps()
37
+ return models, config, image_processor
38
 
39
  # Title and description
40
  st.title("🦋 Butterfly Color Diffusion")
 
54
  height=100
55
  )
56
 
57
+ # Add concept selection dropdown
58
+ available_concepts = [
59
+ "None (No concept)",
60
+ "concept-art-2-1",
61
+ "canna-lily-flowers102",
62
+ "arcane-style-jv",
63
+ "seismic-image",
64
+ "azalea-flowers102",
65
+ "photographic",
66
+ "realistic",
67
+ "detailed",
68
+ "national-geographic",
69
+ "macro-photography",
70
+ "nature-photography"
71
+ ]
72
+
73
+ selected_concept = st.sidebar.selectbox(
74
+ "Select Concept Style",
75
+ available_concepts,
76
+ index=0,
77
+ help="Choose a concept style to apply to your image. Select 'None' to use standard generation."
78
+ )
79
+
80
+ # Convert "None" selection to actual None value
81
+ concept_name = None if selected_concept == "None (No concept)" else selected_concept
82
+
83
  steps = st.sidebar.slider("Inference Steps", min_value=10, max_value=100, value=30, step=1)
84
  guidance_scale = st.sidebar.slider("Guidance Scale", min_value=1.0, max_value=15.0, value=7.5, step=0.1)
85
  seed = st.sidebar.number_input("Seed (0 for random)", min_value=0, max_value=1000000, value=0, step=1)
 
104
  # Load models when needed
105
  if standard_button or color_button:
106
  if st.session_state.models is None:
107
+ st.session_state.models, st.session_state.config ,st.session_state.image_processor= load_models()
108
 
109
  # Update config with current settings
110
  st.session_state.config.num_inference_steps = steps
 
123
  progress_bar = st.progress(0)
124
  start_time = time.time()
125
 
126
+ image = generate_with_multiple_concepts(
127
  models=st.session_state.models,
128
  config=st.session_state.config,
129
+ image_processor=st.session_state.image_processor,
130
  prompt=prompt,
131
+ concept_name=concept_name, # Pass the selected concept
132
  blue_loss_scale=0,
133
  yellow_loss_scale=0,
134
+ guidance_interval=guidance_interval,
135
  progress_bar=progress_bar
136
  )
137
 
138
  end_time = time.time()
139
+ caption = f"Standard Stable Diffusion"
140
+ if concept_name:
141
+ caption += f" with {concept_name} concept"
142
+ st.image(image, caption=caption, use_column_width=True)
143
  st.write(f"Generation time: {end_time - start_time:.2f} seconds")
144
 
145
  # Generate color-guided image
 
153
  models=st.session_state.models,
154
  config=st.session_state.config,
155
  prompt=prompt,
156
+ concept_name=concept_name, # Pass the selected concept
157
  blue_loss_scale=0,
158
  yellow_loss_scale=yellow_strength,
159
  guidance_interval=guidance_interval,
 
161
  )
162
 
163
  end_time = time.time()
164
+ caption = f"Color-Guided Stable Diffusion"
165
+ if concept_name:
166
+ caption += f" with {concept_name} concept"
167
+ st.image(image, caption=caption, use_column_width=True)
168
  st.write(f"Generation time: {end_time - start_time:.2f} seconds")
169
 
170
  # Explanation section
 
181
 
182
  This combination creates a yellow tone in the final image. The strength parameter controls how strongly this color guidance affects the generation process.
183
 
184
+ ### Concept Styles
185
+ The concept styles use textual inversion embeddings to guide the image generation toward a particular artistic style or subject matter. These concepts have been trained on specific images and can dramatically change the look of your generated images.
186
+
187
  ### Technical Details
188
  During each step of the diffusion process, we:
189
  1. Calculate the predicted image at that step