avo-milas commited on
Commit
bbb65c2
·
verified ·
1 Parent(s): 06f8e4c
Files changed (4) hide show
  1. src/app.py +138 -0
  2. src/embeddings.pkl +3 -0
  3. src/label_mappings.json +1 -0
  4. src/mlp_model.pth +3 -0
src/app.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import torch
3
+ import torch.nn as nn
4
+ import numpy as np
5
+ import pickle
6
+ import json
7
+ from PIL import Image
8
+
9
+ st.set_page_config(layout="wide")
10
+
11
+ st.markdown("""
12
+ <style>
13
+ body, html, .css-18e3th9 {
14
+ font-size: 20px !important;
15
+ }
16
+ h1 {
17
+ font-size: 3rem !important;
18
+ }
19
+ h2 {
20
+ font-size: 2.5rem !important;
21
+ }
22
+ label, .stSlider label, .stSelectbox label, .stButton button, .stTextInput label {
23
+ font-size: 22px !important;
24
+ }
25
+ div[data-baseweb="select"] > div {
26
+ font-size: 20px !important;
27
+ }
28
+ .stNumberInput input {
29
+ font-size: 20px !important;
30
+ }
31
+ div[data-testid="stImage"] > figcaption {
32
+ font-size: 1.2rem !important;
33
+ }
34
+ </style>
35
+ """, unsafe_allow_html=True)
36
+
37
+
38
+ with open('embeddings.pkl', 'rb') as f:
39
+ embeddings = pickle.load(f)
40
+
41
+ with open('label_mappings.json', 'r') as f:
42
+ label_mappings = json.load(f)
43
+
44
+
45
+ class MLPRegressor(nn.Module):
46
+ def __init__(self, input_dim, hidden_dims=[512, 128], dropout=0.2):
47
+ super().__init__()
48
+ layers = []
49
+ prev = input_dim
50
+ for h in hidden_dims:
51
+ layers += [
52
+ nn.Linear(prev, h),
53
+ nn.ReLU(),
54
+ nn.Dropout(dropout)
55
+ ]
56
+ prev = h
57
+ layers.append(nn.Linear(prev, 1))
58
+ layers.append(nn.Tanh()) # output in [-1, 1]
59
+ self.net = nn.Sequential(*layers)
60
+
61
+ def forward(self, x):
62
+ return self.net(x)
63
+
64
+
65
+ num_text_features = 3 * 384
66
+ num_numeric_features = 1
67
+ num_categorical_features = 4
68
+ input_dim = num_text_features + num_numeric_features + num_categorical_features
69
+
70
+ model = MLPRegressor(input_dim=input_dim)
71
+ model.load_state_dict(torch.load('mlp_model.pth', map_location='cpu'))
72
+ model.eval()
73
+
74
+
75
+ st.title("Investor Risk Score Estimator")
76
+ st.markdown("---")
77
+ st.subheader("Select 3 images in order of your preference:")
78
+
79
+ image_ids = list(range(1, 17))
80
+ cols = st.columns(4)
81
+ selection = {}
82
+
83
+ for i, img_id in enumerate(image_ids):
84
+ col = cols[i % 4]
85
+ image_path = f"../images/{img_id}.png"
86
+ image = Image.open(image_path)
87
+ col.image(image, caption=f"ID {img_id}", use_container_width=True)
88
+ selection[img_id] = col.checkbox(f"Select ID {img_id}")
89
+
90
+ selected = [img_id for img_id, chosen in selection.items() if chosen]
91
+
92
+ if len(selected) < 3:
93
+ st.info(f"Please select {3 - len(selected)} more image(s).")
94
+ st.stop()
95
+ elif len(selected) > 3:
96
+ st.error("You have selected more than 3 images. Please select exactly 3.")
97
+ st.stop()
98
+ else:
99
+ st.success("✅ 3 images selected. You can proceed!")
100
+
101
+ st.subheader("Set preference order for selected images:")
102
+ pref_1 = st.selectbox("Preference 1", selected, key="p1")
103
+ remaining_2 = [x for x in selected if x != pref_1]
104
+ pref_2 = st.selectbox("Preference 2", remaining_2, key="p2")
105
+ pref_3 = [x for x in remaining_2 if x != pref_2][0]
106
+ st.write(f"Preference 3: {pref_3}")
107
+
108
+ st.header("Sociodemographic Information:")
109
+
110
+ age = st.slider("Age", 18, 100, 30)
111
+
112
+ gender = st.selectbox("Gender", list(label_mappings['gender'].keys()))
113
+ education = st.selectbox("Education", list(label_mappings['education'].keys()))
114
+ income = st.selectbox("Income", list(label_mappings['income'].keys()))
115
+ marital_status = st.selectbox("Marital Status", list(label_mappings['marital_status'].keys()))
116
+
117
+ emb1 = embeddings[pref_1]
118
+ emb2 = embeddings[pref_2]
119
+ emb3 = embeddings[pref_3]
120
+
121
+ emb_features = np.hstack([emb1, emb2, emb3])
122
+
123
+ numeric_features = np.array([age])
124
+
125
+ categorical_features = np.array([
126
+ label_mappings['gender'][gender],
127
+ label_mappings['education'][education],
128
+ label_mappings['income'][income],
129
+ label_mappings['marital_status'][marital_status]
130
+ ])
131
+
132
+ final_input = np.hstack([emb_features, numeric_features, categorical_features]).astype(np.float32)
133
+ final_tensor = torch.tensor(final_input).unsqueeze(0)
134
+
135
+ if st.button("Calculate Risk Score"):
136
+ with torch.no_grad():
137
+ prediction = model(final_tensor).item()
138
+ st.success(f"Predicted Risk Score: {prediction:.3f}")
src/embeddings.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29cf048353616c559c496fb870ab8d8d298e46e45f1cf8c2a300af666cb31e42
3
+ size 26846
src/label_mappings.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"gender": {"female": 0, "male": 1, "other": 2}, "education": {"PhD": 0, "bachelor": 1, "high school": 2, "master": 3}, "income": {"high": 0, "low": 1, "medium": 2}, "marital_status": {"divorced": 0, "married": 1, "single": 2, "widowed": 3}}
src/mlp_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7cbca730a7ae9e0ef71bab0eb968efe2ca81eeebd8b8bfeb0d49c1a981226066
3
+ size 2637933