mubaraknumann commited on
Commit
8cd150e
·
verified ·
1 Parent(s): 091fae7

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +129 -44
src/streamlit_app.py CHANGED
@@ -1,4 +1,10 @@
1
  import os
 
 
 
 
 
 
2
  os.environ["HOME"] = os.getcwd()
3
 
4
  import streamlit as st
@@ -30,13 +36,16 @@ class RepVGGBlock(layers.Layer):
30
  groups=self.config_groups, use_bias=False, name=self.name + '_dense_conv'
31
  )
32
  self.rbr_dense_bn = layers.BatchNormalization(name=self.name + '_dense_bn')
 
33
  self.rbr_1x1_conv = layers.Conv2D(
34
  filters=self.config_out_channels, kernel_size=1,
35
  strides=self.config_strides_val, padding='valid',
36
  groups=self.config_groups, use_bias=False, name=self.name + '_1x1_conv'
37
  )
38
  self.rbr_1x1_bn = layers.BatchNormalization(name=self.name + '_1x1_bn')
39
- self.rbr_identity_bn = None
 
 
40
  self.rbr_reparam = layers.Conv2D(
41
  filters=self.config_out_channels, kernel_size=self.config_kernel_size,
42
  strides=self.config_strides_val, padding='same',
@@ -50,12 +59,16 @@ class RepVGGBlock(layers.Layer):
50
  elif self.config_initial_in_channels != self.actual_in_channels:
51
  raise ValueError(f"Input channel mismatch for layer {self.name}: Expected {self.config_initial_in_channels}, got {self.actual_in_channels}")
52
 
 
53
  if self.rbr_identity_bn is None and \
54
  self.actual_in_channels == self.config_out_channels and self.config_strides_val == 1:
55
  self.rbr_identity_bn = layers.BatchNormalization(name=self.name + '_identity_bn')
56
-
 
57
  super(RepVGGBlock, self).build(input_shape)
58
 
 
 
59
  if not self.rbr_dense_conv.built: self.rbr_dense_conv.build(input_shape)
60
  if not self.rbr_dense_bn.built: self.rbr_dense_bn.build(self.rbr_dense_conv.compute_output_shape(input_shape))
61
  if not self.rbr_1x1_conv.built: self.rbr_1x1_conv.build(input_shape)
@@ -74,82 +87,157 @@ class RepVGGBlock(layers.Layer):
74
  if self.rbr_identity_bn is not None:
75
  out_identity = self.rbr_identity_bn(inputs)
76
  return out_dense + out_1x1 + out_identity
77
- else: return out_dense + out_1x1
 
 
 
 
 
 
 
78
 
79
- def _fuse_bn_tensor(self, conv_layer, bn_layer): # Not called during inference with deploy=True model
80
- kernel = conv_layer.kernel; dtype = kernel.dtype; out_channels = kernel.shape[-1]
81
  gamma = getattr(bn_layer, 'gamma', tf.ones(out_channels, dtype=dtype))
82
  beta = getattr(bn_layer, 'beta', tf.zeros(out_channels, dtype=dtype))
83
  running_mean = getattr(bn_layer, 'moving_mean', tf.zeros(out_channels, dtype=dtype))
84
  running_var = getattr(bn_layer, 'moving_variance', tf.ones(out_channels, dtype=dtype))
85
- epsilon = bn_layer.epsilon; std = tf.sqrt(running_var + epsilon)
 
 
86
  fused_kernel = kernel * (gamma / std)
87
- if conv_layer.use_bias: fused_bias = beta + (gamma * (conv_layer.bias - running_mean)) / std
88
- else: fused_bias = beta - (running_mean * gamma) / std
 
 
89
  return fused_kernel, fused_bias
90
 
91
- def reparameterize(self): # Not called during inference with deploy=True model
92
- if self._deploy_mode_internal: return
 
 
 
93
  branches_to_check = [self.rbr_dense_conv, self.rbr_dense_bn, self.rbr_1x1_conv, self.rbr_1x1_bn]
94
- if self.rbr_identity_bn: branches_to_check.append(self.rbr_identity_bn)
 
 
95
  for branch_layer in branches_to_check:
96
- if not branch_layer.built: raise Exception(f"ERROR: Branch layer {branch_layer.name} for {self.name} not built.")
 
 
97
  kernel_dense, bias_dense = self._fuse_bn_tensor(self.rbr_dense_conv, self.rbr_dense_bn)
98
  kernel_1x1_unpadded, bias_1x1 = self._fuse_bn_tensor(self.rbr_1x1_conv, self.rbr_1x1_bn)
 
99
  pad_amount = self.config_kernel_size // 2
100
  kernel_1x1_padded = tf.pad(kernel_1x1_unpadded, [[pad_amount,pad_amount],[pad_amount,pad_amount],[0,0],[0,0]])
101
- final_kernel = kernel_dense + kernel_1x1_padded; final_bias = bias_dense + bias_1x1
 
 
 
102
  if self.rbr_identity_bn is not None:
103
- running_mean_id = self.rbr_identity_bn.moving_mean; running_var_id = self.rbr_identity_bn.moving_variance
104
- gamma_id = self.rbr_identity_bn.gamma; beta_id = self.rbr_identity_bn.beta
105
- epsilon_id = self.rbr_identity_bn.epsilon; std_id = tf.sqrt(running_var_id + epsilon_id)
 
 
 
 
106
  kernel_id_scaler = gamma_id / std_id
107
  bias_id_term = beta_id - (running_mean_id * gamma_id) / std_id
108
- identity_kernel_np = np.zeros((self.config_kernel_size,self.config_kernel_size,self.actual_in_channels,self.config_out_channels),dtype=np.float32)
109
- for i in range(self.actual_in_channels): identity_kernel_np[pad_amount,pad_amount,i,i] = kernel_id_scaler[i].numpy()
 
 
 
 
 
 
110
  kernel_id_final = tf.convert_to_tensor(identity_kernel_np, dtype=tf.float32)
111
- final_kernel += kernel_id_final; final_bias += bias_id_term
112
- if not self.rbr_reparam.built: raise Exception(f"CRITICAL ERROR: {self.rbr_reparam.name} not built before set_weights.")
113
- self.rbr_reparam.set_weights([final_kernel, final_bias]); self._deploy_mode_internal = True
 
 
 
 
 
 
114
 
115
  def get_config(self):
116
  config = super(RepVGGBlock, self).get_config()
117
  config.update({
118
- "in_channels": self.config_initial_in_channels, "out_channels": self.config_out_channels,
119
- "kernel_size": self.config_kernel_size, "stride": self.config_strides_val,
120
- "groups": self.config_groups, "deploy": self._deploy_mode_internal, "use_se": self.config_use_se
121
- }); return config
 
 
 
 
 
 
122
  @classmethod
123
- def from_config(cls, config): return cls(**config)
 
 
124
  # --- End of RepVGGBlock ---
125
 
126
  # --- NECALayer Class Definition (Verified Version) ---
127
  class NECALayer(layers.Layer):
128
  def __init__(self, channels, gamma=2, b=1, **kwargs):
129
  super(NECALayer, self).__init__(**kwargs)
130
- self.channels = channels; self.gamma = gamma; self.b = b
 
 
 
131
  tf_channels = tf.cast(self.channels, tf.float32)
132
  k_float = (tf.math.log(tf_channels) / tf.math.log(2.0) + self.b) / self.gamma
133
  k_int = tf.cast(tf.round(k_float), tf.int32)
134
- if tf.equal(k_int % 2, 0): self.k_scalar_val = k_int + 1
135
- else: self.k_scalar_val = k_int
136
- self.k_scalar_val = tf.maximum(1, self.k_scalar_val)
 
 
 
 
 
137
  kernel_size_for_conv1d = (int(self.k_scalar_val.numpy()),)
 
138
  self.gap = layers.GlobalAveragePooling2D(keepdims=True)
139
- self.conv1d = layers.Conv1D(filters=1, kernel_size=kernel_size_for_conv1d, padding='same', use_bias=False, name=self.name + '_eca_conv1d')
 
 
 
140
  self.sigmoid = layers.Activation('sigmoid')
 
141
  def call(self, inputs):
142
- if self.channels != inputs.shape[-1]: raise ValueError(f"Input channels {inputs.shape[-1]} != layer channels {self.channels} for {self.name}")
143
- x = self.gap(inputs); x = tf.squeeze(x, axis=[1,2]); x = tf.expand_dims(x, axis=-1)
144
- x = self.conv1d(x); x = tf.squeeze(x, axis=-1); attention = self.sigmoid(x)
 
 
 
 
 
 
 
 
 
 
145
  return inputs * tf.reshape(attention, [-1, 1, 1, self.channels])
 
146
  def get_config(self):
147
  config = super(NECALayer, self).get_config()
148
- config.update({"channels": self.channels, "gamma": self.gamma, "b": self.b}); return config
 
 
 
 
 
 
149
  @classmethod
150
- def from_config(cls, config): return cls(**config)
151
- # --- End of NECALayer ---
152
 
 
153
 
154
  # --- Streamlit App Configuration ---
155
  MODEL_FILENAME = 'genera_cic_v1.keras'
@@ -160,8 +248,7 @@ IMG_HEIGHT = 299
160
  st.set_page_config(page_title="Genera Cloud Classifier", layout="wide")
161
 
162
  # --- Load Model and Label Mapping (Cached for performance) ---
163
- @st.cache_resource
164
- def load_keras_model(model_path):
165
  """Loads the Keras model with custom layer definitions."""
166
  if not os.path.exists(model_path):
167
  st.error(f"Model file not found: {model_path}")
@@ -217,15 +304,14 @@ if model is None or int_to_label is None:
217
  st.error("Application cannot start due to errors loading model or label mapping. Please check the console/logs for details.")
218
  else:
219
  uploaded_file = st.file_uploader("Choose a cloud image...", type=["jpg", "jpeg", "png"])
220
-
221
  if uploaded_file is not None:
222
  try:
223
  image_pil = Image.open(uploaded_file)
224
-
225
  col1, col2 = st.columns(2)
226
  with col1:
227
  st.image(image_pil, caption='Uploaded Image.', use_container_width=True)
228
-
229
  # Preprocess and predict
230
  with st.spinner('Analyzing the sky...'):
231
  processed_image_tensor = preprocess_for_prediction(image_pil)
@@ -238,7 +324,6 @@ else:
238
  top_n = 5 # Show top 5 predictions
239
  # Get indices of sorted probabilities (highest first)
240
  sorted_indices = np.argsort(pred_probabilities)[::-1]
241
-
242
  for i in range(min(top_n, len(pred_probabilities))):
243
  class_index = sorted_indices[i]
244
  class_name = int_to_label.get(class_index, f"Unknown Class ({class_index})")
@@ -261,4 +346,4 @@ st.markdown("---")
261
  st.markdown("Developed as part of the Personalized Weather Intelligence project.")
262
 
263
  print("Current working directory:", os.getcwd())
264
- print("Files in current directory:", os.listdir())
 
1
  import os
2
+ # IMPORTANT: Set this environment variable BEFORE any Streamlit imports
3
+ # This prevents Streamlit from trying to write to /.streamlit for usage stats,
4
+ # which often causes PermissionError in sandboxed environments like Hugging Face Spaces.
5
+ os.environ["STREAMLIT_SERVER_BROWSER_GATHER_USAGE_STATS"] = "false"
6
+
7
+ # Ensure HOME is set to the current working directory for other potential uses
8
  os.environ["HOME"] = os.getcwd()
9
 
10
  import streamlit as st
 
36
  groups=self.config_groups, use_bias=False, name=self.name + '_dense_conv'
37
  )
38
  self.rbr_dense_bn = layers.BatchNormalization(name=self.name + '_dense_bn')
39
+
40
  self.rbr_1x1_conv = layers.Conv2D(
41
  filters=self.config_out_channels, kernel_size=1,
42
  strides=self.config_strides_val, padding='valid',
43
  groups=self.config_groups, use_bias=False, name=self.name + '_1x1_conv'
44
  )
45
  self.rbr_1x1_bn = layers.BatchNormalization(name=self.name + '_1x1_bn')
46
+
47
+ self.rbr_identity_bn = None # Will be initialized in build if conditions met
48
+
49
  self.rbr_reparam = layers.Conv2D(
50
  filters=self.config_out_channels, kernel_size=self.config_kernel_size,
51
  strides=self.config_strides_val, padding='same',
 
59
  elif self.config_initial_in_channels != self.actual_in_channels:
60
  raise ValueError(f"Input channel mismatch for layer {self.name}: Expected {self.config_initial_in_channels}, got {self.actual_in_channels}")
61
 
62
+ # Initialize identity branch BN if conditions are met
63
  if self.rbr_identity_bn is None and \
64
  self.actual_in_channels == self.config_out_channels and self.config_strides_val == 1:
65
  self.rbr_identity_bn = layers.BatchNormalization(name=self.name + '_identity_bn')
66
+
67
+ # Call super().build() after all attributes are potentially set
68
  super(RepVGGBlock, self).build(input_shape)
69
 
70
+ # Explicitly build sub-layers if they haven't been built by the first call to call()
71
+ # This can be important for reparameterization before the first call
72
  if not self.rbr_dense_conv.built: self.rbr_dense_conv.build(input_shape)
73
  if not self.rbr_dense_bn.built: self.rbr_dense_bn.build(self.rbr_dense_conv.compute_output_shape(input_shape))
74
  if not self.rbr_1x1_conv.built: self.rbr_1x1_conv.build(input_shape)
 
87
  if self.rbr_identity_bn is not None:
88
  out_identity = self.rbr_identity_bn(inputs)
89
  return out_dense + out_1x1 + out_identity
90
+ else:
91
+ return out_dense + out_1x1
92
+
93
+ def _fuse_bn_tensor(self, conv_layer, bn_layer):
94
+ # This method is for reparameterization, not called during inference with deploy=True model
95
+ kernel = conv_layer.kernel
96
+ dtype = kernel.dtype
97
+ out_channels = kernel.shape[-1]
98
 
 
 
99
  gamma = getattr(bn_layer, 'gamma', tf.ones(out_channels, dtype=dtype))
100
  beta = getattr(bn_layer, 'beta', tf.zeros(out_channels, dtype=dtype))
101
  running_mean = getattr(bn_layer, 'moving_mean', tf.zeros(out_channels, dtype=dtype))
102
  running_var = getattr(bn_layer, 'moving_variance', tf.ones(out_channels, dtype=dtype))
103
+ epsilon = bn_layer.epsilon
104
+ std = tf.sqrt(running_var + epsilon)
105
+
106
  fused_kernel = kernel * (gamma / std)
107
+ if conv_layer.use_bias:
108
+ fused_bias = beta + (gamma * (conv_layer.bias - running_mean)) / std
109
+ else:
110
+ fused_bias = beta - (running_mean * gamma) / std
111
  return fused_kernel, fused_bias
112
 
113
+ def reparameterize(self):
114
+ # This method is for reparameterization, not called during inference with deploy=True model
115
+ if self._deploy_mode_internal:
116
+ return
117
+
118
  branches_to_check = [self.rbr_dense_conv, self.rbr_dense_bn, self.rbr_1x1_conv, self.rbr_1x1_bn]
119
+ if self.rbr_identity_bn:
120
+ branches_to_check.append(self.rbr_identity_bn)
121
+
122
  for branch_layer in branches_to_check:
123
+ if not branch_layer.built:
124
+ raise Exception(f"ERROR: Branch layer {branch_layer.name} for {self.name} not built.")
125
+
126
  kernel_dense, bias_dense = self._fuse_bn_tensor(self.rbr_dense_conv, self.rbr_dense_bn)
127
  kernel_1x1_unpadded, bias_1x1 = self._fuse_bn_tensor(self.rbr_1x1_conv, self.rbr_1x1_bn)
128
+
129
  pad_amount = self.config_kernel_size // 2
130
  kernel_1x1_padded = tf.pad(kernel_1x1_unpadded, [[pad_amount,pad_amount],[pad_amount,pad_amount],[0,0],[0,0]])
131
+
132
+ final_kernel = kernel_dense + kernel_1x1_padded
133
+ final_bias = bias_dense + bias_1x1
134
+
135
  if self.rbr_identity_bn is not None:
136
+ running_mean_id = self.rbr_identity_bn.moving_mean
137
+ running_var_id = self.rbr_identity_bn.moving_variance
138
+ gamma_id = self.rbr_identity_bn.gamma
139
+ beta_id = self.rbr_identity_bn.beta
140
+ epsilon_id = self.rbr_identity_bn.epsilon
141
+ std_id = tf.sqrt(running_var_id + epsilon_id)
142
+
143
  kernel_id_scaler = gamma_id / std_id
144
  bias_id_term = beta_id - (running_mean_id * gamma_id) / std_id
145
+
146
+ # Create an identity kernel with the correct shape and values
147
+ identity_kernel_np = np.zeros(
148
+ (self.config_kernel_size, self.config_kernel_size, self.actual_in_channels, self.config_out_channels),
149
+ dtype=np.float32
150
+ )
151
+ for i in range(self.actual_in_channels):
152
+ identity_kernel_np[pad_amount, pad_amount, i, i] = kernel_id_scaler[i].numpy()
153
  kernel_id_final = tf.convert_to_tensor(identity_kernel_np, dtype=tf.float32)
154
+
155
+ final_kernel += kernel_id_final
156
+ final_bias += bias_id_term
157
+
158
+ if not self.rbr_reparam.built:
159
+ raise Exception(f"CRITICAL ERROR: {self.rbr_reparam.name} not built before set_weights.")
160
+
161
+ self.rbr_reparam.set_weights([final_kernel, final_bias])
162
+ self._deploy_mode_internal = True
163
 
164
  def get_config(self):
165
  config = super(RepVGGBlock, self).get_config()
166
  config.update({
167
+ "in_channels": self.config_initial_in_channels,
168
+ "out_channels": self.config_out_channels,
169
+ "kernel_size": self.config_kernel_size,
170
+ "stride": self.config_strides_val,
171
+ "groups": self.config_groups,
172
+ "deploy": self._deploy_mode_internal,
173
+ "use_se": self.config_use_se
174
+ })
175
+ return config
176
+
177
  @classmethod
178
+ def from_config(cls, config):
179
+ return cls(**config)
180
+
181
  # --- End of RepVGGBlock ---
182
 
183
  # --- NECALayer Class Definition (Verified Version) ---
184
  class NECALayer(layers.Layer):
185
  def __init__(self, channels, gamma=2, b=1, **kwargs):
186
  super(NECALayer, self).__init__(**kwargs)
187
+ self.channels = channels
188
+ self.gamma = gamma
189
+ self.b = b
190
+
191
  tf_channels = tf.cast(self.channels, tf.float32)
192
  k_float = (tf.math.log(tf_channels) / tf.math.log(2.0) + self.b) / self.gamma
193
  k_int = tf.cast(tf.round(k_float), tf.int32)
194
+
195
+ if tf.equal(k_int % 2, 0):
196
+ self.k_scalar_val = k_int + 1
197
+ else:
198
+ self.k_scalar_val = k_int
199
+ self.k_scalar_val = tf.maximum(1, self.k_scalar_val) # Ensure kernel size is at least 1
200
+
201
+ # Convert to a Python int for Conv1D kernel_size
202
  kernel_size_for_conv1d = (int(self.k_scalar_val.numpy()),)
203
+
204
  self.gap = layers.GlobalAveragePooling2D(keepdims=True)
205
+ self.conv1d = layers.Conv1D(
206
+ filters=1, kernel_size=kernel_size_for_conv1d, padding='same', use_bias=False,
207
+ name=self.name + '_eca_conv1d'
208
+ )
209
  self.sigmoid = layers.Activation('sigmoid')
210
+
211
  def call(self, inputs):
212
+ # Ensure input channels match the layer's expected channels
213
+ if self.channels != inputs.shape[-1]:
214
+ raise ValueError(f"Input channels {inputs.shape[-1]} != layer channels {self.channels} for {self.name}")
215
+
216
+ x = self.gap(inputs)
217
+ x = tf.squeeze(x, axis=[1,2]) # Remove spatial dimensions
218
+ x = tf.expand_dims(x, axis=-1) # Add a channel dimension for Conv1D
219
+
220
+ x = self.conv1d(x)
221
+ x = tf.squeeze(x, axis=-1) # Remove the Conv1D output channel dimension
222
+ attention = self.sigmoid(x)
223
+
224
+ # Reshape attention for element-wise multiplication with input
225
  return inputs * tf.reshape(attention, [-1, 1, 1, self.channels])
226
+
227
  def get_config(self):
228
  config = super(NECALayer, self).get_config()
229
+ config.update({
230
+ "channels": self.channels,
231
+ "gamma": self.gamma,
232
+ "b": self.b
233
+ })
234
+ return config
235
+
236
  @classmethod
237
+ def from_config(cls, config):
238
+ return cls(**config)
239
 
240
+ # --- End of NECALayer ---
241
 
242
  # --- Streamlit App Configuration ---
243
  MODEL_FILENAME = 'genera_cic_v1.keras'
 
248
  st.set_page_config(page_title="Genera Cloud Classifier", layout="wide")
249
 
250
  # --- Load Model and Label Mapping (Cached for performance) ---
251
+ @st.cache_resourcedef load_keras_model(model_path):
 
252
  """Loads the Keras model with custom layer definitions."""
253
  if not os.path.exists(model_path):
254
  st.error(f"Model file not found: {model_path}")
 
304
  st.error("Application cannot start due to errors loading model or label mapping. Please check the console/logs for details.")
305
  else:
306
  uploaded_file = st.file_uploader("Choose a cloud image...", type=["jpg", "jpeg", "png"])
 
307
  if uploaded_file is not None:
308
  try:
309
  image_pil = Image.open(uploaded_file)
310
+
311
  col1, col2 = st.columns(2)
312
  with col1:
313
  st.image(image_pil, caption='Uploaded Image.', use_container_width=True)
314
+
315
  # Preprocess and predict
316
  with st.spinner('Analyzing the sky...'):
317
  processed_image_tensor = preprocess_for_prediction(image_pil)
 
324
  top_n = 5 # Show top 5 predictions
325
  # Get indices of sorted probabilities (highest first)
326
  sorted_indices = np.argsort(pred_probabilities)[::-1]
 
327
  for i in range(min(top_n, len(pred_probabilities))):
328
  class_index = sorted_indices[i]
329
  class_name = int_to_label.get(class_index, f"Unknown Class ({class_index})")
 
346
  st.markdown("Developed as part of the Personalized Weather Intelligence project.")
347
 
348
  print("Current working directory:", os.getcwd())
349
+ print("Files in current directory:", os.listdir())