valegro commited on
Commit
01876de
·
verified ·
1 Parent(s): e55f7a4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +354 -271
app.py CHANGED
@@ -1,285 +1,368 @@
1
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import torch
3
  import torch.nn as nn
4
  import torch.nn.functional as F
5
- import numpy as np
6
- import pandas as pd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
- from torch.utils.data import DataLoader, TensorDataset
9
- import plotly.graph_objects as go
 
10
 
11
- # ===============================
12
- # DESTINAZIONI (4 CLASSI)
13
- # ===============================
14
- DESTINAZIONI = ["ArmadioAnta","DockRigenerato","OggettoVetrina","ToolEducativo"]
15
 
16
- # ===============================
17
- # MLP AVANZATA
18
- # ===============================
19
- class AdvancedMLP(nn.Module):
20
- def __init__(self, input_dim=10, hidden1=32, hidden2=32, num_classes=4):
21
- super().__init__()
22
- self.bn_in = nn.BatchNorm1d(input_dim)
23
- self.fc1 = nn.Linear(input_dim, hidden1)
24
- self.bn1 = nn.BatchNorm1d(hidden1)
25
- self.drop1 = nn.Dropout(0.2)
26
- self.fc2 = nn.Linear(hidden1, hidden2)
27
- self.bn2 = nn.BatchNorm1d(hidden2)
28
- self.drop2 = nn.Dropout(0.2)
29
- self.out = nn.Linear(hidden2, num_classes)
30
 
31
  def forward(self, x):
32
- x = self.bn_in(x)
33
- x = F.relu(self.fc1(x))
34
- x = self.bn1(x)
35
- x = self.drop1(x)
36
- x = F.relu(self.fc2(x))
37
- x = self.bn2(x)
38
- x = self.drop2(x)
39
- logits = self.out(x)
40
- return logits # shape [batch, num_classes]
41
-
42
- # ===============================
43
- # STREAMLIT
44
- # ===============================
45
- st.set_page_config(page_title="Weeko - Advanced AI", layout="wide")
46
- st.title("Weeko - Modello Avanzato per Compatibilità EoL (OR6.2/OR6.3)")
47
-
48
- # Session state
49
- if "model" not in st.session_state:
50
- st.session_state["model"] = AdvancedMLP()
51
- if "trained" not in st.session_state:
52
- st.session_state["trained"] = False
53
- if "X_val" not in st.session_state:
54
- st.session_state["X_val"] = None
55
- if "y_val" not in st.session_state:
56
- st.session_state["y_val"] = None
57
-
58
- # Funzione di train
59
- def train_model(dataset, epochs=20, batch_size=32):
60
- model = st.session_state["model"]
61
- def init_weights(m):
62
- if isinstance(m, nn.Linear):
63
- nn.init.xavier_uniform_(m.weight)
64
- nn.init.zeros_(m.bias)
65
- model.apply(init_weights)
66
-
67
- optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
68
- lossf = nn.CrossEntropyLoss()
69
-
70
- loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
71
- for ep in range(epochs):
72
- total_loss=0
73
- for (xb, yb) in loader:
74
- optimizer.zero_grad()
75
- logits = model(xb)
76
- loss = lossf(logits, yb)
77
- loss.backward()
78
- optimizer.step()
79
- total_loss+=loss.item()
80
-
81
- st.session_state["trained"] = True
82
-
83
- # ============= TABS =============
84
- tabs = st.tabs(["1) Genera Dataset Avanzato","2) Train & Val","3) Valuta Nuovo EoL"])
85
-
86
- # ---------------- TAB 1 ----------------
87
- with tabs[0]:
88
- st.subheader("Genera un dataset fittizio con 10 feature + 4 destinazioni")
89
-
90
- st.write("""
91
- **Feature** possibili:
92
- 1. volume (cm³)
93
- 2. area (cm²)
94
- 3. lunghezza (mm)
95
- 4. spessore (mm)
96
- 5. usura (0..1)
97
- 6. rigidità (0..1)
98
- 7. shape_complexity (0..1)
99
- 8. fori_count (numero fori)
100
- 9. estetica_rating (0..1)
101
- 10. materiale_code (0..3)
102
-
103
- **Destinazione**: 4 classi (ArmadioAnta, DockRigenerato, OggettoVetrina, ToolEducativo)
104
- """)
105
-
106
- n = st.number_input("Quanti campioni fittizi generare?", 100,20000, 1000)
107
- if st.button("Genera dataset & Memorizza in session"):
108
- rng = np.random.default_rng(42)
109
- volume = rng.normal(50,15,n)
110
- area = rng.normal(20,5,n)
111
- lung = rng.normal(100,30,n)
112
- spess = rng.uniform(0.5,5,n)
113
- usura = rng.uniform(0,1,n)
114
- rigid = rng.uniform(0,1,n)
115
- shape_c = rng.uniform(0,1,n)
116
- fori = rng.integers(0,10,n)
117
- estetica = rng.uniform(0,1,n)
118
- materiale = rng.integers(0,4,n)
119
-
120
- # Creiamo un "assegnamento" di destinazione fittizio
121
- # Esempio di logica: se spess<2 => preferiamo Dock; se volume>70 => preferiamo Armadio, etc.
122
- # + un tocco di casualità
123
- y = []
124
- for i in range(n):
125
- # base logic
126
- if volume[i]>70 and spess[i]<3:
127
- cl = "ArmadioAnta"
128
- elif shape_c[i]<0.3 and area[i]<25:
129
- cl = "DockRigenerato"
130
- elif estetica[i]>0.6:
131
- cl = "OggettoVetrina"
132
- else:
133
- cl = "ToolEducativo"
134
- # random dev
135
- if rng.random()<0.1:
136
- cl = rng.choice(DESTINAZIONI)
137
- y.append(cl)
138
-
139
- df = pd.DataFrame({
140
- "volume":volume,"area":area,"lunghezza":lung,"spessore":spess,"usura":usura,"rigidita":rigid,
141
- "shape_complex":shape_c,"fori_count":fori,"estetica":estetica,"materiale":materiale,
142
- "destinazione":y
143
- })
144
-
145
- st.dataframe(df.head(10))
146
- st.session_state["data_df"] = df
147
- st.success("Dataset generato e salvato in session.")
148
-
149
- # ---------------- TAB 2 ----------------
150
- with tabs[1]:
151
- st.subheader("Train & Validation")
152
-
153
- if "data_df" not in st.session_state:
154
- st.warning("Genera prima il dataset fittizio (Tab 1).")
155
- else:
156
- df = st.session_state["data_df"]
157
- st.write(f"Dataset size: {len(df)}")
158
-
159
- if st.button("Esegui training rete neurale"):
160
- # Prepariamo tensori
161
- X_np = df[["volume","area","lunghezza","spessore","usura","rigidita","shape_complex","fori_count","estetica","materiale"]].to_numpy()
162
- y_list = df["destinazione"].tolist()
163
- # Mappiamo destinazioni -> int
164
- label2id = {cls:i for i,cls in enumerate(DESTINAZIONI)}
165
- y_np = np.array([label2id[v] for v in y_list], dtype=np.int64)
166
-
167
- # Shuffle & split
168
- rng = np.random.default_rng(999)
169
- idx = np.arange(len(X_np))
170
- rng.shuffle(idx)
171
- train_size = int(len(X_np)*0.8)
172
- train_idx = idx[:train_size]
173
- val_idx = idx[train_size:]
174
-
175
- X_train = X_np[train_idx]
176
- y_train = y_np[train_idx]
177
- X_val = X_np[val_idx]
178
- y_val = y_np[val_idx]
179
-
180
- # PyTorch
181
- X_t = torch.tensor(X_train, dtype=torch.float32)
182
- y_t = torch.tensor(y_train, dtype=torch.long)
183
- train_dataset = TensorDataset(X_t,y_t)
184
-
185
- train_model(train_dataset, epochs=25, batch_size=64)
186
-
187
- # Salviamo X_val e y_val
188
- st.session_state["X_val"] = torch.tensor(X_val, dtype=torch.float32)
189
- st.session_state["y_val"] = torch.tensor(y_val, dtype=torch.long)
190
-
191
- st.success("Modello addestrato con successo!")
192
-
193
- # Se già trainato, valutiamo la performance su val set
194
- if st.session_state["trained"] and st.session_state["X_val"] is not None:
195
- Xv = st.session_state["X_val"]
196
- yv = st.session_state["y_val"]
197
- model = st.session_state["model"]
198
- with torch.no_grad():
199
- logits = model(Xv)
200
- preds = torch.argmax(logits, dim=1)
201
- acc = (preds==yv).float().mean().item()
202
- st.info(f"Accuracy su validation set: {acc:.2f}")
203
 
204
 
205
- # ---------------- TAB 3 ----------------
206
- with tabs[2]:
207
- st.subheader("Valuta un nuovo EoL e vedi bounding box design wise")
 
 
208
 
209
- if not st.session_state["trained"]:
210
- st.warning("Occorre prima addestrare la rete neurale in Tab 2.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  else:
212
- # Input EoL
213
- st.write("Inserisci 10 feature del componente EoL:")
214
- volume_in = st.number_input("Volume (cm³)", 0.0, 1000.0, 80.0)
215
- area_in = st.number_input("Area (cm²)", 0.0, 500.0, 30.0)
216
- lung_in = st.number_input("Lunghezza (mm)",0.0,2000.0,100.0)
217
- spess_in = st.number_input("Spessore (mm)",0.0,10.0,2.0)
218
- usura_in = st.slider("Usura",0.0,1.0,0.3)
219
- rigid_in = st.slider("Rigidità",0.0,1.0,0.5)
220
- shape_in = st.slider("Shape Complexity",0.0,1.0,0.4)
221
- fori_in = st.number_input("Numero fori",0,50,4)
222
- estet_in = st.slider("Estetica (0=brutto,1=figo)",0.0,1.0,0.6)
223
- mat_in = st.selectbox("Materiale code (0=Plastica,1=Metallo,2=PCB,3=Altro)",[0,1,2,3],index=1)
224
-
225
- if st.button("Calcola Probabilità Compatibilità"):
226
- x_in = np.array([[volume_in, area_in, lung_in, spess_in, usura_in, rigid_in, shape_in, fori_in, estet_in, mat_in]], dtype=np.float32)
227
- model = st.session_state["model"]
228
- xt = torch.tensor(x_in)
229
- with torch.no_grad():
230
- logits = model(xt)
231
- probs = F.softmax(logits, dim=1).numpy()[0]
232
- # Visual
233
- st.write("**Probabilità su 4 destinazioni:**")
234
- for i,cls in enumerate(DESTINAZIONI):
235
- st.write(f"- {cls}: {probs[i]*100:.1f}%")
236
- best_idx = np.argmax(probs)
237
- best_cls = DESTINAZIONI[best_idx]
238
- st.success(f"Più probabile destinazione: {best_cls}")
239
-
240
- # Un bounding box 3D (fittizio)
241
- # Esempio: disegniamo un parallelepipedo EoL in Plotly
242
- def create_box(L, A, S):
243
- # ipotesi: area = L x W => W= area / L
244
- # volume= L x W x H => H= volume/(L x W)
245
- # ma useremo spessore in parte
246
- # semplifichiamo: base lung_in x sqrt(area_in), altezza = spess_in * 10...
247
- import math
248
- W = max(1, A / max(1,L))
249
- H = max(1, S*15) # spessore * 15 come fattore
250
- # vertici
251
- return [
252
- (0,0,0),(L,0,0),(L,W,0),(0,W,0),
253
- (0,0,H),(L,0,H),(L,W,H),(0,W,H)
254
- ]
255
- box_verts = create_box(lung_in, area_in, spess_in)
256
- edges = [(0,1),(1,2),(2,3),(3,0),
257
- (4,5),(5,6),(6,7),(7,4),
258
- (0,4),(1,5),(2,6),(3,7)]
259
- fig = go.Figure()
260
- for (i,j) in edges:
261
- xs = [box_verts[i][0], box_verts[j][0]]
262
- ys = [box_verts[i][1], box_verts[j][1]]
263
- zs = [box_verts[i][2], box_verts[j][2]]
264
- fig.add_trace(go.Scatter3d(x=xs,y=ys,z=zs,mode='lines', line=dict(color='blue',width=5), showlegend=False))
265
- fig.update_layout(
266
- title="Bounding box EoL (fittizio design wise)",
267
- scene=dict(
268
- xaxis_title='X(mm)', yaxis_title='Y(mm)', zaxis_title='Z(mm)'
269
- ),
270
- width=600,height=500
271
- )
272
- st.plotly_chart(fig, use_container_width=True)
273
-
274
- # Testuale
275
- st.write(f"Suggerimento generico di assemblaggio per {best_cls}:")
276
-
277
- if best_cls=="ArmadioAnta":
278
- st.info("Potresti aggiungere cerniere su spessore <3mm, rifinitura per margini.")
279
- elif best_cls=="DockRigenerato":
280
- st.info("Prevedi alloggiamento con fori=2-6 per connettori, ottimo su plastica/metallo.")
281
- elif best_cls=="OggettoVetrina":
282
- st.info("Valorizza l'estetica >0.5, rifinitura superficiale e shape complesso ok.")
283
- else:
284
- st.info("Kit didattico, personalizzabile con fori e shape vario.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
285
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ import pandas as pd
3
+ import numpy as np
4
+ from PIL import Image
5
+ import os
6
+
7
+ # Scikit-learn
8
+ from sklearn.model_selection import train_test_split
9
+ from sklearn.preprocessing import LabelEncoder, StandardScaler
10
+ from sklearn.metrics import accuracy_score, confusion_matrix
11
+ from sklearn.pipeline import Pipeline
12
+ from sklearn.linear_model import LogisticRegression
13
+ from sklearn.ensemble import RandomForestClassifier
14
+ import matplotlib.pyplot as plt
15
+ import seaborn as sns
16
+
17
+ # PyTorch
18
  import torch
19
  import torch.nn as nn
20
  import torch.nn.functional as F
21
+ import random
22
+
23
+ ###########################################
24
+ # 1) Libreria Destinazioni (multi-class)
25
+ ###########################################
26
+ TARGET_LABELS = ["Armadio", "Dock", "Vetrina", "Tool"]
27
+
28
+ def pick_random_target(row):
29
+ """
30
+ Logica fittizia: in base ad area e shape_code assegna una destinazione
31
+ """
32
+ # Semplifichiamo: se shape_code = 0 => preferiamo "Armadio"
33
+ # shape_code = 1 => "Dock"
34
+ # shape_code = 2 => "Vetrina"
35
+ # shape_code = 3 => "Tool"
36
+ # ma aggiungiamo un po' di random
37
+ code = int(row["shape_code"])
38
+ return TARGET_LABELS[code]
39
+
40
+ ###########################################
41
+ # 2) Genera dataset sintetico
42
+ ###########################################
43
+ def generate_synthetic_data_mc(n=300, seed=42):
44
+ """
45
+ Genera un dataset multi-classe:
46
+ - volume, area, lunghezza, spessore, shape_code (0..3), usura
47
+ - label = "Armadio"/"Dock"/"Vetrina"/"Tool" (fittizi)
48
+ """
49
+ np.random.seed(seed)
50
+ random.seed(seed)
51
+
52
+ volume = np.clip(np.random.normal(50,15,n), 10, 200)
53
+ area = np.clip(np.random.normal(20,8,n), 5, 200)
54
+ lung = np.clip(np.random.normal(100,30,n), 20, 300)
55
+ spess = np.clip(np.random.normal(5,1,n), 0.5, 10)
56
+ usura = np.random.rand(n) # 0..1
57
+ shape_code = np.random.randint(0,4,n) # 4 possibili "forme"
58
+
59
+ df = pd.DataFrame({
60
+ "volume": volume,
61
+ "area": area,
62
+ "lunghezza": lung,
63
+ "spessore": spess,
64
+ "usura": usura,
65
+ "shape_code": shape_code
66
+ })
67
+
68
+ # label => in base a shape_code
69
+ df["target"] = df.apply(pick_random_target, axis=1)
70
+ return df
71
+
72
+ ###########################################
73
+ # 3) Modello PyTorch VAE per generative reuse
74
+ ###########################################
75
+ class MiniVAE(nn.Module):
76
+ def __init__(self, input_dim=4, latent_dim=2):
77
+ super().__init__()
78
+ self.fc1 = nn.Linear(input_dim, 16)
79
+ self.fc21 = nn.Linear(16, latent_dim)
80
+ self.fc22 = nn.Linear(16, latent_dim)
81
+ self.fc3 = nn.Linear(latent_dim, 16)
82
+ self.fc4 = nn.Linear(16, input_dim)
83
 
84
+ def encode(self, x):
85
+ h = F.relu(self.fc1(x))
86
+ return self.fc21(h), self.fc22(h)
87
 
88
+ def reparameterize(self, mu, logvar):
89
+ std = torch.exp(0.5 * logvar)
90
+ eps = torch.randn_like(std)
91
+ return mu + eps*std
92
 
93
+ def decode(self, z):
94
+ h = F.relu(self.fc3(z))
95
+ return self.fc4(h)
 
 
 
 
 
 
 
 
 
 
 
96
 
97
  def forward(self, x):
98
+ mu, logvar = self.encode(x)
99
+ z = self.reparameterize(mu, logvar)
100
+ recon = self.decode(z)
101
+ return recon, mu, logvar
102
+
103
+ def vae_loss(recon_x, x, mu, logvar):
104
+ mse = F.mse_loss(recon_x, x, reduction='sum')
105
+ kld = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
106
+ return mse + kld
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
 
108
 
109
+ ###########################################
110
+ # STREAMLIT SETUP
111
+ ###########################################
112
+ st.set_page_config(page_title="WEEKO – Full AI App", layout="wide")
113
+ st.title("WEEKO – Full AI Demo: multi-class + generative + overlay")
114
 
115
+ if "df" not in st.session_state:
116
+ st.session_state["df"] = None
117
+ if "model" not in st.session_state:
118
+ st.session_state["model"] = None
119
+ if "vae" not in st.session_state:
120
+ st.session_state["vae"] = None
121
+ if "vae_trained" not in st.session_state:
122
+ st.session_state["vae_trained"] = False
123
+ if "label_enc" not in st.session_state:
124
+ st.session_state["label_enc"] = None
125
+
126
+
127
+ ###########################################
128
+ # SIDEBAR
129
+ ###########################################
130
+ menu = st.sidebar.radio("Fasi", ["Dataset","Training","Inferenza","Generative Reuse (VAE)","Overlay Estetico","Dashboard"])
131
+
132
+ ###########################################
133
+ # FUNZIONE: Caricamento/generazione dataset
134
+ ###########################################
135
+ def dataset_phase():
136
+ st.subheader("Fase 1: Dataset")
137
+
138
+ data_option = st.radio("Scegli la fonte del dataset", ["Genera Sintetico","Carica CSV"], horizontal=True)
139
+ if data_option == "Genera Sintetico":
140
+ n = st.slider("Numero di campioni", 50,1000,300,step=50)
141
+ if st.button("Genera"):
142
+ df = generate_synthetic_data_mc(n=n)
143
+ st.session_state["df"] = df
144
+ st.success("Dataset sintetico generato!")
145
  else:
146
+ file = st.file_uploader("Carica CSV con colonne: volume, area, lunghezza, spessore, usura, shape_code, target", type=["csv"])
147
+ if file:
148
+ df = pd.read_csv(file)
149
+ st.session_state["df"] = df
150
+ st.success("Dataset caricato!")
151
+
152
+ if st.session_state["df"] is not None:
153
+ st.write("Anteprima dataset:")
154
+ st.dataframe(st.session_state["df"].head(10))
155
+ st.write("Distribuzione target:", st.session_state["df"]["target"].value_counts())
156
+ csv = st.session_state["df"].to_csv(index=False)
157
+ st.download_button("Scarica Dataset", csv, "dataset.csv","text/csv")
158
+
159
+ ###########################################
160
+ # FUNZIONE: Training multi-class (es. RandomForest)
161
+ ###########################################
162
+ def training_phase():
163
+ st.subheader("Fase 2: Training Modello Multi-Classe")
164
+ df = st.session_state["df"]
165
+ if df is None:
166
+ st.error("Prima genera o carica il dataset!")
167
+ return
168
+
169
+ st.write("Esempio: usiamo un RandomForest + pipeline StandardScaler.")
170
+ from sklearn.pipeline import Pipeline
171
+ from sklearn.ensemble import RandomForestClassifier
172
+
173
+ # Prepara X,y
174
+ # NB: shape_code e -> numerico, target -> labelEnc
175
+ if "target" not in df.columns:
176
+ st.error("Il dataset deve avere colonna 'target'!")
177
+ return
178
+
179
+ # Estrai X e y
180
+ # Minimal check: [volume, area, lunghezza, spessore, usura, shape_code]
181
+ needed_cols = ["volume","area","lunghezza","spessore","usura","shape_code","target"]
182
+ for c in needed_cols:
183
+ if c not in df.columns:
184
+ st.error(f"Colonna '{c}' assente nel dataset!")
185
+ return
186
+
187
+ X = df[["volume","area","lunghezza","spessore","usura","shape_code"]]
188
+ y = df["target"].astype(str)
189
+
190
+ # label encode
191
+ from sklearn.preprocessing import LabelEncoder
192
+ le = LabelEncoder()
193
+ y_enc = le.fit_transform(y)
194
+
195
+ st.session_state["label_enc"] = le
196
+
197
+ # train test split
198
+ from sklearn.model_selection import train_test_split
199
+ X_train, X_test, y_train, y_test = train_test_split(X, y_enc, test_size=0.2, random_state=42)
200
+
201
+ # pipeline
202
+ pipe = Pipeline([
203
+ ("scaler", StandardScaler()),
204
+ ("clf", RandomForestClassifier(n_estimators=100, random_state=42))
205
+ ])
206
+
207
+ pipe.fit(X_train, y_train)
208
+ pred = pipe.predict(X_test)
209
+
210
+ acc = accuracy_score(y_test, pred)
211
+ st.write(f"Accuracy su test: {acc:.3f}")
212
+
213
+ cm = confusion_matrix(y_test, pred)
214
+ fig, ax = plt.subplots()
215
+ sns.heatmap(cm, annot=True, fmt='d', ax=ax)
216
+ st.pyplot(fig)
217
+
218
+ st.session_state["model"] = pipe
219
+ st.success("Modello addestrato e salvato in session_state['model'].")
220
+
221
+ ###########################################
222
+ # FUNZIONE: Inference multi-class
223
+ ###########################################
224
+ def inference_phase():
225
+ st.subheader("Fase 3: Inferenza su un nuovo EoL")
226
+ if st.session_state["model"] is None:
227
+ st.error("Devi prima addestrare il modello!")
228
+ return
229
+ model = st.session_state["model"]
230
+ le = st.session_state["label_enc"]
231
+
232
+ # Inserimento EoL
233
+ col1, col2 = st.columns(2)
234
+ with col1:
235
+ vol = st.number_input("Volume (cm³)",0.0,1000.0,50.0,step=1.0)
236
+ area= st.number_input("Area (cm²)",0.0,1000.0,30.0,step=1.0)
237
+ lung= st.number_input("Lunghezza (mm)",0.0,2000.0,120.0,step=1.0)
238
+ with col2:
239
+ spess= st.number_input("Spessore (mm)",0.0,20.0,5.0,step=0.5)
240
+ usura= st.slider("Usura (0=nuovo,1=usuratissimo)",0.0,1.0,0.3)
241
+ shape_code = st.selectbox("Shape Code", [0,1,2,3], index=0)
242
+
243
+ if st.button("Calcola Compatibilità"):
244
+ x_np = np.array([[vol, area, lung, spess, usura, shape_code]], dtype=float)
245
+ proba = model.predict_proba(x_np)[0]
246
+ classes = le.inverse_transform(range(len(proba)))
247
+ # Ordina disc
248
+ sorted_idx = np.argsort(-proba)
249
+ st.write("**Compatibilità con i possibili target** (ordine decrescente):")
250
+ for i in sorted_idx:
251
+ st.write(f"- {classes[i]}: {proba[i]*100:.1f}%")
252
+ # Top1
253
+ top1 = classes[sorted_idx[0]]
254
+ st.success(f"**Consiglio**: {top1}")
255
+
256
+ ###########################################
257
+ # FUNZIONE: Mini VAE generative reuse
258
+ ###########################################
259
+ def generative_phase():
260
+ st.subheader("Fase 4: Generative Reuse (VAE)")
261
+
262
+ # Creiamo / inizializziamo
263
+ if st.session_state["vae"] is None:
264
+ st.session_state["vae"] = MiniVAE()
265
+ vae = st.session_state["vae"]
266
+
267
+ # Se non allenato, lo alleniamo su dati fittizi
268
+ if not st.session_state["vae_trained"]:
269
+ if st.button("Allena VAE su dataset fittizio di 4 feature"):
270
+ # Generiamo un dataset: [dim1, dim2, spess, dec]
271
+ rng = np.random.default_rng(123)
272
+ n = 500
273
+ dim1 = rng.normal(50,10,n)
274
+ dim2 = rng.normal(20,5,n)
275
+ spess = rng.normal(5,1,n)
276
+ dec = rng.uniform(0,1,n)
277
+ arr = np.column_stack([dim1, dim2, spess, dec])
278
+ X_t = torch.tensor(arr,dtype=torch.float32)
279
+
280
+ optimizer = torch.optim.Adam(vae.parameters(), lr=1e-3)
281
+ epochs=20
282
+ batch_size=32
283
+ dataset = torch.utils.data.TensorDataset(X_t)
284
+ loader = torch.utils.data.DataLoader(dataset,batch_size=batch_size,shuffle=True)
285
+
286
+ for ep in range(epochs):
287
+ total_loss=0
288
+ for (batch,) in loader:
289
+ optimizer.zero_grad()
290
+ recon, mu, logvar = vae(batch)
291
+ loss=vae_loss(recon, batch, mu, logvar)
292
+ loss.backward()
293
+ optimizer.step()
294
+ total_loss += loss.item()
295
+ st.session_state["vae_trained"] = True
296
+ st.success("VAE addestrato!")
297
+ else:
298
+ st.info("VAE già addestrato.")
299
 
300
+ # Genera sample
301
+ if st.session_state["vae_trained"]:
302
+ if st.button("Genera 5 soluzioni"):
303
+ with torch.no_grad():
304
+ z = torch.randn(5,2)
305
+ recon = vae.decode(z)
306
+ arr = recon.numpy()
307
+ df = pd.DataFrame(arr, columns=["Dim1","Dim2","Spess","Decor"])
308
+ st.dataframe(df.round(2))
309
+ st.write("Interpretazione: Dim1, Dim2 = dimensioni, Spess=spessore, Decor=0..1 stima di ‘finitura artistica’?")
310
+
311
+ ###########################################
312
+ # FUNZIONE: Overlay Estetico
313
+ ###########################################
314
+ def overlay_phase():
315
+ st.subheader("Fase 5: Overlay Estetico")
316
+
317
+ col1, col2 = st.columns(2)
318
+ with col1:
319
+ file_eol = st.file_uploader("Foto EoL", type=["jpg","png"], key="uplEolOverlay")
320
+ with col2:
321
+ file_obj = st.file_uploader("Foto Oggetto Finale", type=["jpg","png"], key="uplObjOverlay")
322
+
323
+ alpha = st.slider("Trasparenza EoL", 0.0,1.0,0.5)
324
+
325
+ if file_eol and file_obj:
326
+ eol_img = Image.open(file_eol).convert("RGBA")
327
+ obj_img = Image.open(file_obj).convert("RGBA")
328
+ eol_img = eol_img.resize(obj_img.size)
329
+ blended = Image.blend(obj_img, eol_img, alpha)
330
+ st.image(blended, caption="Overlay EoL + Oggetto Finale", use_column_width=True)
331
+ else:
332
+ st.info("Carica entrambe le immagini per vedere l'overlay")
333
+
334
+ ###########################################
335
+ # FUNZIONE: Dashboard
336
+ ###########################################
337
+ def dashboard_phase():
338
+ st.subheader("Dashboard")
339
+ if st.session_state["df"] is None:
340
+ st.error("Nessun dataset caricato/generato.")
341
+ return
342
+ df = st.session_state["df"]
343
+ st.write("**Info dataset**")
344
+ st.write(df.describe())
345
+
346
+ if "model" in st.session_state and st.session_state["model"] is not None:
347
+ st.write("**Modello**: RandomForest + pipeline (scaler). Se addestrato con logistic regression, stesso pipeline logic.")
348
+ # Non stai salvando metriche, potresti salvare accuracy e confusion matrix in session state
349
+ st.info("Per metrics dettagliate, vedi la fase Training.")
350
+ else:
351
+ st.warning("Modello non addestrato. Niente metriche da mostrare.")
352
+
353
+
354
+ ###########################################
355
+ # MAIN flow
356
+ ###########################################
357
+ if menu=="Dataset":
358
+ dataset_phase()
359
+ elif menu=="Training":
360
+ training_phase()
361
+ elif menu=="Inferenza":
362
+ inference_phase()
363
+ elif menu=="Generative Reuse (VAE)":
364
+ generative_phase()
365
+ elif menu=="Overlay Estetico":
366
+ overlay_phase()
367
+ elif menu=="Dashboard":
368
+ dashboard_phase()