Update app.py
Browse files
app.py
CHANGED
|
@@ -102,7 +102,7 @@ st.markdown("""
|
|
| 102 |
|
| 103 |
/* Links */
|
| 104 |
a {
|
| 105 |
-
color: #
|
| 106 |
text-decoration: none !important;
|
| 107 |
}
|
| 108 |
a:hover {
|
|
@@ -112,7 +112,7 @@ st.markdown("""
|
|
| 112 |
|
| 113 |
/* Button styling */
|
| 114 |
.stButton > button {
|
| 115 |
-
background-color: #
|
| 116 |
color: #f8fafc;
|
| 117 |
border: none;
|
| 118 |
font-weight: 600;
|
|
@@ -134,7 +134,7 @@ st.markdown("""
|
|
| 134 |
|
| 135 |
/* Metric styling */
|
| 136 |
[data-testid="stMetricValue"] {
|
| 137 |
-
color: #
|
| 138 |
}
|
| 139 |
|
| 140 |
.main-header {
|
|
@@ -158,7 +158,7 @@ st.markdown("""
|
|
| 158 |
margin-bottom: 30px;
|
| 159 |
}
|
| 160 |
.nav-links a {
|
| 161 |
-
color: #
|
| 162 |
padding: 0 20px;
|
| 163 |
font-size: 14px;
|
| 164 |
}
|
|
@@ -189,9 +189,9 @@ st.markdown("""
|
|
| 189 |
}
|
| 190 |
|
| 191 |
.stTabs [data-baseweb="tab"][aria-selected="true"] {
|
| 192 |
-
background-color: #
|
| 193 |
color: #ffffff !important;
|
| 194 |
-
border: 1px solid #
|
| 195 |
}
|
| 196 |
|
| 197 |
/* Hide default highlight bar */
|
|
@@ -204,10 +204,1491 @@ st.markdown("""
|
|
| 204 |
background-color: transparent;
|
| 205 |
}
|
| 206 |
.stSlider [data-baseweb="slider"] > div > div {
|
| 207 |
-
background: #
|
| 208 |
}
|
| 209 |
.stSlider [data-baseweb="slider"] [role="slider"] {
|
| 210 |
-
background-color: #
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 211 |
border: 2px solid #020617;
|
| 212 |
}
|
| 213 |
|
|
|
|
| 102 |
|
| 103 |
/* Links */
|
| 104 |
a {
|
| 105 |
+
color: #005981 !important; /* Sky 500 */
|
| 106 |
text-decoration: none !important;
|
| 107 |
}
|
| 108 |
a:hover {
|
|
|
|
| 112 |
|
| 113 |
/* Button styling */
|
| 114 |
.stButton > button {
|
| 115 |
+
background-color: #005981;
|
| 116 |
color: #f8fafc;
|
| 117 |
border: none;
|
| 118 |
font-weight: 600;
|
|
|
|
| 134 |
|
| 135 |
/* Metric styling */
|
| 136 |
[data-testid="stMetricValue"] {
|
| 137 |
+
color: #005981;
|
| 138 |
}
|
| 139 |
|
| 140 |
.main-header {
|
|
|
|
| 158 |
margin-bottom: 30px;
|
| 159 |
}
|
| 160 |
.nav-links a {
|
| 161 |
+
color: #005981;
|
| 162 |
padding: 0 20px;
|
| 163 |
font-size: 14px;
|
| 164 |
}
|
|
|
|
| 189 |
}
|
| 190 |
|
| 191 |
.stTabs [data-baseweb="tab"][aria-selected="true"] {
|
| 192 |
+
background-color: #005981 !important; /* Select Color: Sky 500 */
|
| 193 |
color: #ffffff !important;
|
| 194 |
+
border: 1px solid #005981;
|
| 195 |
}
|
| 196 |
|
| 197 |
/* Hide default highlight bar */
|
|
|
|
| 204 |
background-color: transparent;
|
| 205 |
}
|
| 206 |
.stSlider [data-baseweb="slider"] > div > div {
|
| 207 |
+
background: #005981 !important;
|
| 208 |
}
|
| 209 |
.stSlider [data-baseweb="slider"] [role="slider"] {
|
| 210 |
+
background-color: #005981;
|
| 211 |
+
border: 2px solid #020617;
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
/* Info Box Styling */
|
| 215 |
+
.stAlert {
|
| 216 |
+
background-color: rgba(2, 132, 199, 0.05) !important;
|
| 217 |
+
border: 1px solid rgba(2, 132, 199, 0.1) !important;
|
| 218 |
+
color: #cbd5e1 !important;
|
| 219 |
+
}
|
| 220 |
+
</style>
|
| 221 |
+
""", unsafe_allow_html=True)
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
# ============================================================================
|
| 225 |
+
# Session State Initialization
|
| 226 |
+
# ============================================================================
|
| 227 |
+
|
| 228 |
+
if 'results' not in st.session_state:
|
| 229 |
+
st.session_state.results = None
|
| 230 |
+
if 'config' not in st.session_state:
|
| 231 |
+
st.session_state.config = {
|
| 232 |
+
'elastic_modulus_mode': 'multiplicative',
|
| 233 |
+
'elastic_modulus_sigma': 0.4,
|
| 234 |
+
'lambda_min': 0.35,
|
| 235 |
+
'lambda_max': 1.20,
|
| 236 |
+
'inference_steps': 8
|
| 237 |
+
}
|
| 238 |
+
if 'audit_log' not in st.session_state:
|
| 239 |
+
st.session_state.audit_log = deque(maxlen=100)
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
# ============================================================================
|
| 243 |
+
# Header with Branding
|
| 244 |
+
# ============================================================================
|
| 245 |
+
|
| 246 |
+
st.markdown("""
|
| 247 |
+
<div class='main-header'>
|
| 248 |
+
<h1 style='color: #f8fafc; margin: 0; font-size: 2.8rem;'>Deterministic Governance Mechanism</h1>
|
| 249 |
+
<p class='company-name'>Verhash LLC | Precision Substrate Computing</p>
|
| 250 |
+
</div>
|
| 251 |
+
<div class='nav-links'>
|
| 252 |
+
<a href='https://verhash.com' target='_blank'>🌐 verhash.com</a>
|
| 253 |
+
<a href='mailto:ryan@verhash.net'>📧 Contact</a>
|
| 254 |
+
<a href='https://github.com/yourusername/verhash' target='_blank'>💻 GitHub</a>
|
| 255 |
+
<a href='https://verhash.com/docs' target='_blank'>📚 Documentation</a>
|
| 256 |
+
</div>
|
| 257 |
+
""", unsafe_allow_html=True)
|
| 258 |
+
|
| 259 |
+
st.markdown("---")
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
# ============================================================================
|
| 263 |
+
# Sidebar: Configuration Panel
|
| 264 |
+
# ============================================================================
|
| 265 |
+
|
| 266 |
+
st.sidebar.header("Configuration")
|
| 267 |
+
|
| 268 |
+
# Elastic Modulus Mode
|
| 269 |
+
mode = st.sidebar.selectbox(
|
| 270 |
+
"Elastic Modulus Mode",
|
| 271 |
+
options=['cosine', 'multiplicative', 'rbf'],
|
| 272 |
+
index=1, # Default: multiplicative
|
| 273 |
+
help="Cosine: direction only | Multiplicative: angle×proximity | RBF: proximity only"
|
| 274 |
+
)
|
| 275 |
+
|
| 276 |
+
# Sigma parameter
|
| 277 |
+
sigma = st.sidebar.slider(
|
| 278 |
+
"Sigma (σ) - Field Extent",
|
| 279 |
+
min_value=0.2,
|
| 280 |
+
max_value=1.0,
|
| 281 |
+
value=0.4,
|
| 282 |
+
step=0.05,
|
| 283 |
+
help="Lower = tighter binding, higher = looser binding"
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
st.sidebar.markdown(f"**Current: σ={sigma:.2f}**")
|
| 287 |
+
|
| 288 |
+
# Lambda max
|
| 289 |
+
lambda_max = st.sidebar.slider(
|
| 290 |
+
"Lambda Max (lambda_max) - Max Pressure",
|
| 291 |
+
min_value=0.5,
|
| 292 |
+
max_value=2.0,
|
| 293 |
+
value=1.2,
|
| 294 |
+
step=0.1,
|
| 295 |
+
help="Higher = stricter exclusion"
|
| 296 |
+
)
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
# Run button
|
| 300 |
+
if st.sidebar.button("Run Deterministic Exclusion Demo", type="primary"):
|
| 301 |
+
with st.spinner("Running inference..."):
|
| 302 |
+
# Update config
|
| 303 |
+
st.session_state.config['elastic_modulus_mode'] = mode
|
| 304 |
+
st.session_state.config['elastic_modulus_sigma'] = sigma
|
| 305 |
+
st.session_state.config['lambda_max'] = lambda_max
|
| 306 |
+
|
| 307 |
+
# Run test
|
| 308 |
+
results = run_deterministic_exclusion_demo(
|
| 309 |
+
elastic_modulus_mode=mode,
|
| 310 |
+
sigma=sigma,
|
| 311 |
+
print_banner=False,
|
| 312 |
+
)
|
| 313 |
+
|
| 314 |
+
st.session_state.results = results
|
| 315 |
+
|
| 316 |
+
# Log event
|
| 317 |
+
st.session_state.audit_log.append({
|
| 318 |
+
'timestamp': time.time(),
|
| 319 |
+
'operation': 'run_inference',
|
| 320 |
+
'mode': mode,
|
| 321 |
+
'sigma': sigma,
|
| 322 |
+
'hash': results['hash']
|
| 323 |
+
})
|
| 324 |
+
|
| 325 |
+
st.success("Inference complete.")
|
| 326 |
+
|
| 327 |
+
st.sidebar.markdown("---")
|
| 328 |
+
st.sidebar.markdown("### 📞 Contact")
|
| 329 |
+
st.sidebar.caption("""
|
| 330 |
+
**Verhash LLC** | [verhash.com](https://verhash.com)
|
| 331 |
+
📧 [ryan@verhash.net](mailto:ryan@verhash.net)
|
| 332 |
+
*Patent Pending*
|
| 333 |
+
""")
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
# ============================================================================
|
| 337 |
+
# Main Content Area
|
| 338 |
+
# ============================================================================
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
st.success("✨ **Live Demo** - Testing deterministic AI governance on real LLMs")
|
| 342 |
+
st.markdown("---")
|
| 343 |
+
|
| 344 |
+
# Create tabs
|
| 345 |
+
tab1, tab2, tab3, tab4 = st.tabs(["Mechanism Demo", "LLM Guardrail", "Live LLM Testing", "Explain & Tune"])
|
| 346 |
+
|
| 347 |
+
# ----------------------------------------------------------------------------
|
| 348 |
+
# TAB 1: Mechanism Demo (Original)
|
| 349 |
+
# --------------------------------------------------------------------------=--
|
| 350 |
+
with tab1:
|
| 351 |
+
if st.session_state.results is None:
|
| 352 |
+
st.info("Configure parameters in the sidebar and click 'Run Deterministic Exclusion Demo'.")
|
| 353 |
+
else:
|
| 354 |
+
results = st.session_state.results
|
| 355 |
+
|
| 356 |
+
# Row 1: Deterministic Audit Trail
|
| 357 |
+
st.header("Deterministic Audit Trail")
|
| 358 |
+
col1, col2 = st.columns([2, 1])
|
| 359 |
+
with col1:
|
| 360 |
+
st.subheader("SHA-256")
|
| 361 |
+
st.markdown(f'<div class="hash-display">{results["hash"]}</div>', unsafe_allow_html=True)
|
| 362 |
+
with col2:
|
| 363 |
+
st.metric("Total Excluded", results["excluded"])
|
| 364 |
+
|
| 365 |
+
# Row 2: Abstention Indicator
|
| 366 |
+
st.header("Outcome Verification")
|
| 367 |
+
if results.get("winner_index") is None:
|
| 368 |
+
st.error("Abstained")
|
| 369 |
+
else:
|
| 370 |
+
st.success(f"Winner index: {results['winner_index']}")
|
| 371 |
+
st.caption("Expected winner: Candidate 0 | Expected excluded: 3")
|
| 372 |
+
|
| 373 |
+
# Row 3: Phase Log
|
| 374 |
+
st.header("Phase Log")
|
| 375 |
+
phase_log = results['phase_log']
|
| 376 |
+
import pandas as pd
|
| 377 |
+
df = pd.DataFrame(phase_log)
|
| 378 |
+
st.dataframe(df[['step', 'phase', 'pressure', 'survivors', 'excluded']], use_container_width=True, hide_index=True)
|
| 379 |
+
|
| 380 |
+
# Visualization
|
| 381 |
+
fig = go.Figure()
|
| 382 |
+
fig.add_trace(go.Scatter(
|
| 383 |
+
x=[e['step'] for e in phase_log],
|
| 384 |
+
y=[e['pressure'] for e in phase_log],
|
| 385 |
+
mode='lines+markers',
|
| 386 |
+
name='Pressure lambda(t)',
|
| 387 |
+
line=dict(color='cyan', width=3)
|
| 388 |
+
))
|
| 389 |
+
st.plotly_chart(fig, use_container_width=True)
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
# Row 5: Audit Log
|
| 393 |
+
st.header("Run Log")
|
| 394 |
+
if st.session_state.audit_log:
|
| 395 |
+
log_df = pd.DataFrame(st.session_state.audit_log)
|
| 396 |
+
log_df['timestamp'] = pd.to_datetime(log_df['timestamp'], unit='s')
|
| 397 |
+
st.dataframe(log_df[['timestamp', 'operation', 'mode', 'sigma', 'hash']], use_container_width=True, hide_index=True)
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
# ----------------------------------------------------------------------------
|
| 401 |
+
# TAB 2: LLM Guardrail Playground (New)
|
| 402 |
+
# ----------------------------------------------------------------------------
|
| 403 |
+
with tab2:
|
| 404 |
+
st.header("Deterministic LLM Filter")
|
| 405 |
+
st.markdown("""
|
| 406 |
+
A model-agnostic post-processor that evaluates candidate outputs against a verified substrate.
|
| 407 |
+
The mechanism deterministically accepts, rejects, or abstains based on explicit constraints.
|
| 408 |
+
""")
|
| 409 |
+
|
| 410 |
+
col_input1, col_input2 = st.columns(2)
|
| 411 |
+
|
| 412 |
+
with col_input1:
|
| 413 |
+
st.subheader("1. Verified Substrate")
|
| 414 |
+
st.markdown("Approved facts (Ground Truth). One per line.")
|
| 415 |
+
substrate_input = st.text_area(
|
| 416 |
+
"Substrate",
|
| 417 |
+
value="The sky is blue\nWater is wet\nParis is capital of France",
|
| 418 |
+
height=200,
|
| 419 |
+
key="llm_substrate"
|
| 420 |
+
)
|
| 421 |
+
|
| 422 |
+
with col_input2:
|
| 423 |
+
st.subheader("2. LLM Candidates")
|
| 424 |
+
st.markdown("Generated responses (including hallucinations). One per line.")
|
| 425 |
+
candidates_input = st.text_area(
|
| 426 |
+
"Candidates",
|
| 427 |
+
value="The sky is blue\nThe sky is green\nThe sky is made of cheese",
|
| 428 |
+
height=200,
|
| 429 |
+
key="llm_candidates"
|
| 430 |
+
)
|
| 431 |
+
|
| 432 |
+
# Add full trace checkbox
|
| 433 |
+
show_full_trace = st.checkbox(
|
| 434 |
+
"Show full stress evolution (all steps for all candidates)",
|
| 435 |
+
value=False,
|
| 436 |
+
help="Disable for faster UI with large candidate sets"
|
| 437 |
+
)
|
| 438 |
+
|
| 439 |
+
if st.button("Run Guardrail Filter", type="primary"):
|
| 440 |
+
from llm_adapter import DeterministicGuardrail, DeterministicHashEmbedderND
|
| 441 |
+
import math
|
| 442 |
+
|
| 443 |
+
# Parse inputs
|
| 444 |
+
substrate_list = [line.strip() for line in substrate_input.split('\n') if line.strip()]
|
| 445 |
+
candidate_list = [line.strip() for line in candidates_input.split('\n') if line.strip()]
|
| 446 |
+
|
| 447 |
+
if not substrate_list or not candidate_list:
|
| 448 |
+
st.error("Please provide both substrate and candidates.")
|
| 449 |
+
else:
|
| 450 |
+
with st.spinner("Projecting to material field..."):
|
| 451 |
+
# Initialize Guardrail
|
| 452 |
+
# Use current sidebar config instead of hardcoded preset
|
| 453 |
+
guard = DeterministicGuardrail(
|
| 454 |
+
substrate_texts=substrate_list,
|
| 455 |
+
config_preset='balanced' # Starting point
|
| 456 |
+
)
|
| 457 |
+
# Overwrite with current sidebar settings for consistency
|
| 458 |
+
guard.config['elastic_modulus_mode'] = st.session_state.config['elastic_modulus_mode']
|
| 459 |
+
guard.config['elastic_modulus_sigma'] = st.session_state.config['elastic_modulus_sigma']
|
| 460 |
+
guard.config['lambda_max'] = st.session_state.config['lambda_max']
|
| 461 |
+
|
| 462 |
+
# We want to INSPECT, not just filter
|
| 463 |
+
inspection = guard.inspect(candidate_list)
|
| 464 |
+
|
| 465 |
+
result_text = inspection['selected_text']
|
| 466 |
+
metrics = inspection['metrics']
|
| 467 |
+
candidate_metrics = metrics.get('candidates')
|
| 468 |
+
if candidate_metrics is None:
|
| 469 |
+
candidate_metrics = [
|
| 470 |
+
{
|
| 471 |
+
'phase_log': [],
|
| 472 |
+
'fractured': False,
|
| 473 |
+
'fractured_step': None,
|
| 474 |
+
'stress': 0.0,
|
| 475 |
+
'hash': 'N/A',
|
| 476 |
+
}
|
| 477 |
+
for _ in candidate_list
|
| 478 |
+
]
|
| 479 |
+
|
| 480 |
+
# Build detailed numbers view (high-D physics + 2D projection for plot)
|
| 481 |
+
with st.spinner("Computing embeddings..."):
|
| 482 |
+
# Use specialized caches for different dimensionality needs
|
| 483 |
+
sub_vecs = compute_substrate_embeddings_highd(substrate_list)
|
| 484 |
+
sub_vecs_2d = compute_substrate_embeddings_2d(substrate_list)
|
| 485 |
+
|
| 486 |
+
from llm_adapter import DeterministicHashEmbedderND
|
| 487 |
+
embedder_highd = DeterministicHashEmbedderND(dim=16)
|
| 488 |
+
cand_vecs = [embedder_highd.embed(t) for t in candidate_list]
|
| 489 |
+
|
| 490 |
+
# Use 2D for visualization
|
| 491 |
+
embedder_2d = DeterministicHashEmbedderND(dim=2)
|
| 492 |
+
cand_vecs_2d = [embedder_2d.embed(t) for t in candidate_list]
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
|
| 496 |
+
def cosine_similarity(v1, v2):
|
| 497 |
+
"""Cosine similarity between N-D vectors."""
|
| 498 |
+
dot = sum(a * b for a, b in zip(v1, v2))
|
| 499 |
+
mag1 = math.sqrt(sum(a * a for a in v1))
|
| 500 |
+
mag2 = math.sqrt(sum(b * b for b in v2))
|
| 501 |
+
if mag1 == 0 or mag2 == 0:
|
| 502 |
+
return 0.0
|
| 503 |
+
return dot / (mag1 * mag2)
|
| 504 |
+
|
| 505 |
+
def euclidean_distance(v1, v2):
|
| 506 |
+
"""Euclidean distance between N-D vectors."""
|
| 507 |
+
return math.sqrt(sum((a - b) ** 2 for a, b in zip(v1, v2)))
|
| 508 |
+
|
| 509 |
+
numbers_view = {
|
| 510 |
+
"embedder": {
|
| 511 |
+
"name": "DeterministicHashEmbedderND",
|
| 512 |
+
"definition": "sha256(text) -> 16D in [0,1], projected to 2D for plotting",
|
| 513 |
+
},
|
| 514 |
+
"substrate": [
|
| 515 |
+
{"text": t, "vec2": [round(v[0], 8), round(v[1], 8)]}
|
| 516 |
+
for t, v in zip(substrate_list, sub_vecs_2d)
|
| 517 |
+
],
|
| 518 |
+
"candidates": []
|
| 519 |
+
}
|
| 520 |
+
|
| 521 |
+
# For each candidate, compute detailed metrics
|
| 522 |
+
for i, (cand_vec, cand_text) in enumerate(zip(cand_vecs, candidate_list)):
|
| 523 |
+
# Compute alignment and distance to each substrate point
|
| 524 |
+
per_substrate = []
|
| 525 |
+
best_alignment = -1
|
| 526 |
+
best_distance = float('inf')
|
| 527 |
+
best_j = None
|
| 528 |
+
|
| 529 |
+
for j, sub_vec in enumerate(sub_vecs):
|
| 530 |
+
cos_sim = cosine_similarity(cand_vec, sub_vec)
|
| 531 |
+
alignment = (cos_sim + 1.0) / 2.0 # Normalize to [0,1]
|
| 532 |
+
dist = euclidean_distance(cand_vec, sub_vec)
|
| 533 |
+
|
| 534 |
+
per_substrate.append({
|
| 535 |
+
"substrate_index": j,
|
| 536 |
+
"substrate_text": substrate_list[j],
|
| 537 |
+
"cosine_similarity": round(cos_sim, 8),
|
| 538 |
+
"alignment_0_1": round(alignment, 8),
|
| 539 |
+
"euclidean_distance": round(dist, 8),
|
| 540 |
+
})
|
| 541 |
+
|
| 542 |
+
# Selection rule: highest alignment
|
| 543 |
+
if alignment > best_alignment:
|
| 544 |
+
best_alignment = alignment
|
| 545 |
+
best_distance = dist
|
| 546 |
+
best_j = j
|
| 547 |
+
|
| 548 |
+
# Get engine results for this candidate
|
| 549 |
+
cand_metrics = candidate_metrics[i]
|
| 550 |
+
phase_log = cand_metrics.get('phase_log', [])
|
| 551 |
+
|
| 552 |
+
# Build stress evolution - full or abbreviated
|
| 553 |
+
if show_full_trace:
|
| 554 |
+
stress_evolution = [
|
| 555 |
+
{
|
| 556 |
+
"step": entry["step"],
|
| 557 |
+
"phase": entry["phase"],
|
| 558 |
+
"lambda": round(entry["pressure"], 8),
|
| 559 |
+
"elastic_modulus_E": round(entry.get("elastic_modulus", 0.0), 8),
|
| 560 |
+
"delta_stress": round(entry.get("delta_stress", 0.0), 8),
|
| 561 |
+
"cumulative_stress": round(entry["stress"], 8),
|
| 562 |
+
"fractured": entry["fractured"]
|
| 563 |
+
}
|
| 564 |
+
for entry in phase_log
|
| 565 |
+
]
|
| 566 |
+
else:
|
| 567 |
+
# Abbreviated: first 2 steps, fracture step (if any), last step
|
| 568 |
+
abbreviated = []
|
| 569 |
+
|
| 570 |
+
if len(phase_log) > 0:
|
| 571 |
+
abbreviated.append(phase_log[0])
|
| 572 |
+
if len(phase_log) > 1:
|
| 573 |
+
abbreviated.append(phase_log[1])
|
| 574 |
+
|
| 575 |
+
fracture_step = cand_metrics.get('fractured_step')
|
| 576 |
+
if fracture_step is not None and fracture_step > 1:
|
| 577 |
+
abbreviated.append(phase_log[fracture_step])
|
| 578 |
+
elif len(phase_log) > 2:
|
| 579 |
+
abbreviated.append(phase_log[-1])
|
| 580 |
+
|
| 581 |
+
stress_evolution = [
|
| 582 |
+
{
|
| 583 |
+
"step": entry["step"],
|
| 584 |
+
"phase": entry["phase"],
|
| 585 |
+
"lambda": round(entry["pressure"], 8),
|
| 586 |
+
"elastic_modulus_E": round(entry.get("elastic_modulus", 0.0), 8),
|
| 587 |
+
"delta_stress": round(entry.get("delta_stress", 0.0), 8),
|
| 588 |
+
"cumulative_stress": round(entry["stress"], 8),
|
| 589 |
+
"fractured": entry["fractured"]
|
| 590 |
+
}
|
| 591 |
+
for entry in abbreviated
|
| 592 |
+
]
|
| 593 |
+
|
| 594 |
+
if len(phase_log) > len(abbreviated):
|
| 595 |
+
stress_evolution.append({
|
| 596 |
+
"note": f"...{len(phase_log) - len(abbreviated)} intermediate steps omitted (enable 'Show full stress evolution' to see all)"
|
| 597 |
+
})
|
| 598 |
+
|
| 599 |
+
numbers_view["candidates"].append({
|
| 600 |
+
"candidate_index": i,
|
| 601 |
+
"text": cand_text,
|
| 602 |
+
"vec2": [round(cand_vecs_2d[i][0], 8), round(cand_vecs_2d[i][1], 8)],
|
| 603 |
+
"comparisons": per_substrate,
|
| 604 |
+
"selection_rule": "highest_alignment",
|
| 605 |
+
"selected_by_alignment": {
|
| 606 |
+
"substrate_index": best_j,
|
| 607 |
+
"substrate_text": substrate_list[best_j] if best_j is not None else None,
|
| 608 |
+
"alignment_0_1": round(best_alignment, 8),
|
| 609 |
+
"euclidean_distance": round(float(best_distance), 8),
|
| 610 |
+
},
|
| 611 |
+
"engine": {
|
| 612 |
+
"fractured": cand_metrics.get('fractured', False),
|
| 613 |
+
"fractured_step": cand_metrics.get('fractured_step'),
|
| 614 |
+
"final_stress": round(float(cand_metrics['stress']), 8),
|
| 615 |
+
"hash": cand_metrics.get('hash', 'N/A'),
|
| 616 |
+
"stress_evolution": stress_evolution
|
| 617 |
+
}
|
| 618 |
+
})
|
| 619 |
+
|
| 620 |
+
# Display Result
|
| 621 |
+
st.markdown("### Result")
|
| 622 |
+
if result_text:
|
| 623 |
+
st.success(f"**Selected:** {result_text}")
|
| 624 |
+
else:
|
| 625 |
+
st.warning("**Abstained**: No candidates met the yield strength requirements.")
|
| 626 |
+
|
| 627 |
+
# Visualization of the Field
|
| 628 |
+
st.markdown("### Material Field Projection")
|
| 629 |
+
|
| 630 |
+
# Plot
|
| 631 |
+
fig_map = go.Figure()
|
| 632 |
+
|
| 633 |
+
# Plot Substrate (Green Squares)
|
| 634 |
+
fig_map.add_trace(go.Scatter(
|
| 635 |
+
x=[v[0] for v in sub_vecs_2d],
|
| 636 |
+
y=[v[1] for v in sub_vecs_2d],
|
| 637 |
+
mode='markers',
|
| 638 |
+
name='Substrate (Facts)',
|
| 639 |
+
text=substrate_list,
|
| 640 |
+
marker=dict(symbol='square', size=12, color='green')
|
| 641 |
+
))
|
| 642 |
+
|
| 643 |
+
# Plot Candidates (Red Circles)
|
| 644 |
+
# Differentiate selected vs excluded
|
| 645 |
+
selected_idx = metrics['final_output'].candidate_index if metrics['final_output'] else -1
|
| 646 |
+
|
| 647 |
+
colors = ['gold' if i == selected_idx else 'red' for i in range(len(cand_vecs))]
|
| 648 |
+
sizes = [15 if i == selected_idx else 10 for i in range(len(cand_vecs))]
|
| 649 |
+
|
| 650 |
+
fig_map.add_trace(go.Scatter(
|
| 651 |
+
x=[v[0] for v in cand_vecs_2d],
|
| 652 |
+
y=[v[1] for v in cand_vecs_2d],
|
| 653 |
+
mode='markers+text',
|
| 654 |
+
name='Candidates',
|
| 655 |
+
text=candidate_list,
|
| 656 |
+
textposition='top center',
|
| 657 |
+
marker=dict(symbol='circle', size=sizes, color=colors)
|
| 658 |
+
))
|
| 659 |
+
|
| 660 |
+
fig_map.update_layout(
|
| 661 |
+
title="Semantic Material Field (2D Mock Projection)",
|
| 662 |
+
xaxis_title="Dimension X",
|
| 663 |
+
yaxis_title="Dimension Y",
|
| 664 |
+
xaxis=dict(range=[0, 1]),
|
| 665 |
+
yaxis=dict(range=[0, 1]),
|
| 666 |
+
template="plotly_dark",
|
| 667 |
+
height=500
|
| 668 |
+
)
|
| 669 |
+
|
| 670 |
+
st.plotly_chart(fig_map, use_container_width=True)
|
| 671 |
+
|
| 672 |
+
# Metrics JSON
|
| 673 |
+
st.markdown("### Metrics")
|
| 674 |
+
metrics_json = {
|
| 675 |
+
"survived": result_text is not None,
|
| 676 |
+
"total_excluded": metrics['total_excluded'],
|
| 677 |
+
"falsification_pressure": f"{metrics['phase_log'][-1]['pressure']:.2f} lambda"
|
| 678 |
+
}
|
| 679 |
+
st.json(metrics_json)
|
| 680 |
+
|
| 681 |
+
# Complete Numerical Audit Trail
|
| 682 |
+
st.markdown("### Complete Numerical Audit Trail")
|
| 683 |
+
st.caption("Vectors, distances, selection rule, engine hash, stress evolution")
|
| 684 |
+
st.json(numbers_view)
|
| 685 |
+
|
| 686 |
+
# ----------------------------------------------------------------------------
|
| 687 |
+
# TAB 3: LLM Testing (HF Spaces Enhanced)
|
| 688 |
+
# ----------------------------------------------------------------------------
|
| 689 |
+
def call_huggingface_inference(model, prompt, api_key, temperature=0.7, max_tokens=256):
|
| 690 |
+
"""Call HuggingFace Inference API directly"""
|
| 691 |
+
import requests
|
| 692 |
+
|
| 693 |
+
API_URL = f"https://api-inference.huggingface.co/models/{model}"
|
| 694 |
+
headers = {"Authorization": f"Bearer {api_key}"}
|
| 695 |
+
|
| 696 |
+
payload = {
|
| 697 |
+
"inputs": prompt,
|
| 698 |
+
"parameters": {
|
| 699 |
+
"temperature": temperature,
|
| 700 |
+
"max_new_tokens": max_tokens,
|
| 701 |
+
"return_full_text": False
|
| 702 |
+
}
|
| 703 |
+
}
|
| 704 |
+
|
| 705 |
+
response = requests.post(API_URL, headers=headers, json=payload, timeout=30)
|
| 706 |
+
response.raise_for_status()
|
| 707 |
+
|
| 708 |
+
result = response.json()
|
| 709 |
+
if isinstance(result, list) and len(result) > 0:
|
| 710 |
+
return result[0].get("generated_text", "")
|
| 711 |
+
return result.get("generated_text", "")
|
| 712 |
+
|
| 713 |
+
# ----------------------------------------------------------------------------
|
| 714 |
+
# TAB 3: LLM Testing (HF Spaces Enhanced)
|
| 715 |
+
# ----------------------------------------------------------------------------
|
| 716 |
+
with tab3:
|
| 717 |
+
st.header("LLM Testing")
|
| 718 |
+
st.markdown("""
|
| 719 |
+
**Live API Testing** - Test any LLM with the Deterministic Guardrail in real-time.
|
| 720 |
+
|
| 721 |
+
Supports: OpenAI, Anthropic, Google Gemini, local models (Ollama, llama.cpp, vLLM), and any OpenAI-compatible API.
|
| 722 |
+
""")
|
| 723 |
+
|
| 724 |
+
# HF Spaces detection
|
| 725 |
+
IS_SPACES = os.getenv("SPACE_ID") is not None
|
| 726 |
+
|
| 727 |
+
# Show banner if in Spaces
|
| 728 |
+
if IS_SPACES:
|
| 729 |
+
st.info("🚀 **Running on Hugging Face Spaces** - Configure API keys in Settings → Secrets (for admins) or enter below")
|
| 730 |
+
|
| 731 |
+
# API Configuration
|
| 732 |
+
st.subheader("1. LLM API Configuration")
|
| 733 |
+
|
| 734 |
+
col_api1, col_api2 = st.columns(2)
|
| 735 |
+
|
| 736 |
+
with col_api1:
|
| 737 |
+
# Add Hugging Face Inference API option for Spaces
|
| 738 |
+
provider_options = [
|
| 739 |
+
"Hugging Face Inference API (Free)", # NEW - great for Spaces demos
|
| 740 |
+
"OpenAI",
|
| 741 |
+
"Anthropic (Claude)",
|
| 742 |
+
"Google (Gemini)",
|
| 743 |
+
"Local (Ollama)",
|
| 744 |
+
"Local (llama.cpp)",
|
| 745 |
+
"Custom OpenAI-compatible"
|
| 746 |
+
]
|
| 747 |
+
|
| 748 |
+
# Default to HF Inference if in Spaces
|
| 749 |
+
default_index = 0 if IS_SPACES else 1
|
| 750 |
+
|
| 751 |
+
api_preset = st.selectbox(
|
| 752 |
+
"Provider Preset",
|
| 753 |
+
options=provider_options,
|
| 754 |
+
index=default_index,
|
| 755 |
+
help="Select a preset or use custom for any OpenAI-compatible endpoint"
|
| 756 |
+
)
|
| 757 |
+
|
| 758 |
+
# Set defaults based on preset
|
| 759 |
+
if api_preset == "Hugging Face Inference API (Free)":
|
| 760 |
+
default_base_url = "https://api-inference.huggingface.co/models"
|
| 761 |
+
default_model = "meta-llama/Llama-3.2-3B-Instruct" # Free tier model
|
| 762 |
+
needs_key = True
|
| 763 |
+
api_type = "huggingface"
|
| 764 |
+
elif api_preset == "OpenAI":
|
| 765 |
+
default_base_url = "https://api.openai.com/v1"
|
| 766 |
+
default_model = "gpt-4o-mini" # Updated to current model
|
| 767 |
+
needs_key = True
|
| 768 |
+
api_type = "openai"
|
| 769 |
+
elif api_preset == "Anthropic (Claude)":
|
| 770 |
+
default_base_url = "https://api.anthropic.com/v1"
|
| 771 |
+
default_model = "claude-3-5-sonnet-20241022"
|
| 772 |
+
needs_key = True
|
| 773 |
+
api_type = "anthropic"
|
| 774 |
+
elif api_preset == "Google (Gemini)":
|
| 775 |
+
default_base_url = "https://generativelanguage.googleapis.com/v1beta"
|
| 776 |
+
default_model = "gemini-2.0-flash-exp"
|
| 777 |
+
needs_key = True
|
| 778 |
+
api_type = "google"
|
| 779 |
+
elif api_preset == "Local (Ollama)":
|
| 780 |
+
default_base_url = "http://localhost:11434/v1"
|
| 781 |
+
default_model = "llama3.1"
|
| 782 |
+
needs_key = False
|
| 783 |
+
api_type = "openai"
|
| 784 |
+
elif api_preset == "Local (llama.cpp)":
|
| 785 |
+
default_base_url = "http://localhost:8080/v1"
|
| 786 |
+
default_model = "local-model"
|
| 787 |
+
needs_key = False
|
| 788 |
+
api_type = "openai"
|
| 789 |
+
else:
|
| 790 |
+
default_base_url = "https://api.openai.com/v1"
|
| 791 |
+
default_model = "gpt-4o-mini"
|
| 792 |
+
needs_key = True
|
| 793 |
+
api_type = "openai"
|
| 794 |
+
|
| 795 |
+
# Disable local options if in Spaces
|
| 796 |
+
if IS_SPACES and "Local" in api_preset:
|
| 797 |
+
st.warning("⚠️ Local models not available in Spaces. Use cloud APIs or HF Inference API.")
|
| 798 |
+
|
| 799 |
+
api_base_url = st.text_input(
|
| 800 |
+
"Base URL",
|
| 801 |
+
value=default_base_url,
|
| 802 |
+
help="API endpoint base URL",
|
| 803 |
+
disabled=IS_SPACES and "Local" in api_preset
|
| 804 |
+
)
|
| 805 |
+
|
| 806 |
+
with col_api2:
|
| 807 |
+
# Check for API key in environment (HF Secrets)
|
| 808 |
+
env_key = None
|
| 809 |
+
if IS_SPACES:
|
| 810 |
+
if api_preset == "OpenAI":
|
| 811 |
+
env_key = os.getenv("OPENAI_API_KEY")
|
| 812 |
+
elif api_preset == "Anthropic (Claude)":
|
| 813 |
+
env_key = os.getenv("ANTHROPIC_API_KEY")
|
| 814 |
+
elif api_preset == "Google (Gemini)":
|
| 815 |
+
env_key = os.getenv("GOOGLE_API_KEY")
|
| 816 |
+
elif api_preset == "Hugging Face Inference API (Free)":
|
| 817 |
+
env_key = os.getenv("HF_TOKEN")
|
| 818 |
+
|
| 819 |
+
if env_key:
|
| 820 |
+
st.success(f"✓ Using API key from Space secrets")
|
| 821 |
+
api_key = env_key
|
| 822 |
+
show_input = False
|
| 823 |
+
else:
|
| 824 |
+
show_input = True
|
| 825 |
+
|
| 826 |
+
if show_input:
|
| 827 |
+
api_key = st.text_input(
|
| 828 |
+
"API Key" + (" (optional for local)" if not needs_key else ""),
|
| 829 |
+
type="password",
|
| 830 |
+
help="Your API key (not required for local models)",
|
| 831 |
+
placeholder="sk-..." if needs_key else "not needed for local models"
|
| 832 |
+
)
|
| 833 |
+
|
| 834 |
+
model_name = st.text_input(
|
| 835 |
+
"Model Name",
|
| 836 |
+
value=default_model,
|
| 837 |
+
help="Model identifier"
|
| 838 |
+
)
|
| 839 |
+
|
| 840 |
+
col_temp, col_num = st.columns(2)
|
| 841 |
+
with col_temp:
|
| 842 |
+
temperature = st.slider("Temperature", 0.0, 2.0, 0.7, 0.1, help="Higher = more creative")
|
| 843 |
+
with col_num:
|
| 844 |
+
# Limit responses in Spaces for performance
|
| 845 |
+
max_responses = 5 if IS_SPACES else 10
|
| 846 |
+
default_responses = 2 if IS_SPACES else 3
|
| 847 |
+
|
| 848 |
+
num_responses = st.number_input(
|
| 849 |
+
"Number of Responses",
|
| 850 |
+
min_value=1,
|
| 851 |
+
max_value=max_responses,
|
| 852 |
+
value=default_responses,
|
| 853 |
+
help=f"Generate multiple responses for comparison{' (limited in Spaces)' if IS_SPACES else ''}"
|
| 854 |
+
)
|
| 855 |
+
|
| 856 |
+
col_timeout, col_retry = st.columns(2)
|
| 857 |
+
with col_timeout:
|
| 858 |
+
# Shorter timeout for Spaces
|
| 859 |
+
default_timeout = 30 if IS_SPACES else 60
|
| 860 |
+
request_timeout = st.number_input(
|
| 861 |
+
"Request Timeout (seconds)",
|
| 862 |
+
min_value=5,
|
| 863 |
+
max_value=600,
|
| 864 |
+
value=default_timeout,
|
| 865 |
+
step=5,
|
| 866 |
+
help="Increase for slow or local models"
|
| 867 |
+
)
|
| 868 |
+
with col_retry:
|
| 869 |
+
max_retries = st.number_input(
|
| 870 |
+
"Max Retries",
|
| 871 |
+
min_value=0,
|
| 872 |
+
max_value=5,
|
| 873 |
+
value=2,
|
| 874 |
+
step=1,
|
| 875 |
+
help="Automatic retries on transient failures"
|
| 876 |
+
)
|
| 877 |
+
|
| 878 |
+
# Substrate Configuration
|
| 879 |
+
st.subheader("2. Verified Substrate (Ground Truth)")
|
| 880 |
+
st.markdown("Enter verified facts that define what is correct. One per line.")
|
| 881 |
+
substrate_input_llm = st.text_area(
|
| 882 |
+
"Substrate Facts",
|
| 883 |
+
value="The Eiffel Tower is in Paris\nWater boils at 100°C at sea level\nPython is a programming language",
|
| 884 |
+
height=120,
|
| 885 |
+
key="llm_test_substrate"
|
| 886 |
+
)
|
| 887 |
+
|
| 888 |
+
# Prompt Configuration
|
| 889 |
+
st.subheader("3. Prompt Configuration")
|
| 890 |
+
col_prompt1, col_prompt2 = st.columns([3, 1])
|
| 891 |
+
|
| 892 |
+
with col_prompt1:
|
| 893 |
+
user_prompt = st.text_area(
|
| 894 |
+
"User Prompt",
|
| 895 |
+
value="Tell me a fact about one of the topics mentioned above.",
|
| 896 |
+
height=100,
|
| 897 |
+
help="The prompt sent to the LLM"
|
| 898 |
+
)
|
| 899 |
+
|
| 900 |
+
with col_prompt2:
|
| 901 |
+
use_system_prompt = st.checkbox("Use System Prompt", value=False)
|
| 902 |
+
|
| 903 |
+
if use_system_prompt:
|
| 904 |
+
system_prompt = st.text_area(
|
| 905 |
+
"System Prompt (optional)",
|
| 906 |
+
value="You are a helpful assistant. Provide accurate, factual information.",
|
| 907 |
+
height=100
|
| 908 |
+
)
|
| 909 |
+
else:
|
| 910 |
+
system_prompt = None
|
| 911 |
+
|
| 912 |
+
# Governance Configuration (Control Surface)
|
| 913 |
+
st.subheader("4. Governance Controls")
|
| 914 |
+
st.markdown("Adjust the physics strictness to see how the system responds to ambiguity vs. facts.")
|
| 915 |
+
|
| 916 |
+
gov_col1, gov_col2 = st.columns(2)
|
| 917 |
+
with gov_col1:
|
| 918 |
+
gov_preset = st.selectbox(
|
| 919 |
+
"Governance Preset",
|
| 920 |
+
["Forgiving", "Balanced", "Conservative", "Aggressive", "Mission Critical"],
|
| 921 |
+
index=1,
|
| 922 |
+
help="Sets physics parameters (Lambda/Sigma). 'Forgiving' tolerates ambiguity; 'Mission Critical' demands exact alignment."
|
| 923 |
+
)
|
| 924 |
+
preset_map = {
|
| 925 |
+
"Balanced": "balanced",
|
| 926 |
+
"Conservative": "conservative",
|
| 927 |
+
"Aggressive": "aggressive",
|
| 928 |
+
"Mission Critical": "mission_critical",
|
| 929 |
+
"Forgiving": "forgiving"
|
| 930 |
+
}
|
| 931 |
+
selected_gov_preset = preset_map[gov_preset]
|
| 932 |
+
|
| 933 |
+
# Educational Display: Show the actual numbers
|
| 934 |
+
from material_field_engine import load_config
|
| 935 |
+
config_data = load_config(selected_gov_preset)
|
| 936 |
+
|
| 937 |
+
st.markdown("---")
|
| 938 |
+
st.markdown("**Physics Parameters (active settings):**")
|
| 939 |
+
|
| 940 |
+
p_col1, p_col2, p_col3 = st.columns(3)
|
| 941 |
+
with p_col1:
|
| 942 |
+
st.metric(
|
| 943 |
+
label="Sigma (σ) - Tolerance",
|
| 944 |
+
value=config_data['elastic_modulus_sigma'],
|
| 945 |
+
help="Field Extent. Higher (0.8) = Vague associations accepted. Lower (0.2) = Exact match required."
|
| 946 |
+
)
|
| 947 |
+
with p_col2:
|
| 948 |
+
st.metric(
|
| 949 |
+
label="Lambda Max (λ) - Pressure",
|
| 950 |
+
value=config_data['lambda_max'],
|
| 951 |
+
help="Max Exclusion Pressure. Higher (1.5+) = Crushes weak bonds (High strictness). Lower (0.5) = Gentle."
|
| 952 |
+
)
|
| 953 |
+
with p_col3:
|
| 954 |
+
st.markdown(f"**Mode**: `{config_data['elastic_modulus_mode']}`")
|
| 955 |
+
st.caption("Algorithm usually 'multiplicative' (Angle × Distance).")
|
| 956 |
+
|
| 957 |
+
st.info(f"💡 **Teacher's Note**: To prevent valid answers from being blocked (fracturing), you would **increase Sigma** (widen the net) or **decrease Lambda** (reduce the pressure).")
|
| 958 |
+
|
| 959 |
+
with gov_col2:
|
| 960 |
+
st.write("**Gate Settings**")
|
| 961 |
+
topic_gate_enabled = st.checkbox(
|
| 962 |
+
"Enable Topic Gate",
|
| 963 |
+
value=False,
|
| 964 |
+
help="Fast pre-filter. If unchecked, physics runs on everything (good for demos)."
|
| 965 |
+
)
|
| 966 |
+
ambiguity_detection = st.checkbox(
|
| 967 |
+
"Allow Clarifications",
|
| 968 |
+
value=True,
|
| 969 |
+
help="If ON, clarifying questions ('Could you specify?') are marked CLARIFY instead of failing."
|
| 970 |
+
)
|
| 971 |
+
|
| 972 |
+
# Run Button
|
| 973 |
+
if st.button("🚀 Generate & Test Responses", type="primary"):
|
| 974 |
+
# Parse substrate
|
| 975 |
+
substrate_list = [line.strip() for line in substrate_input_llm.split('\n') if line.strip()]
|
| 976 |
+
|
| 977 |
+
if not substrate_list:
|
| 978 |
+
st.error("Please provide substrate facts")
|
| 979 |
+
elif not user_prompt:
|
| 980 |
+
st.error("Please provide a user prompt")
|
| 981 |
+
elif not api_base_url.strip():
|
| 982 |
+
st.error("Please provide a base URL for the API")
|
| 983 |
+
elif not model_name.strip():
|
| 984 |
+
st.error("Please provide a model name")
|
| 985 |
+
else:
|
| 986 |
+
st.info(f"📡 Generating {num_responses} response(s) from {api_preset}...")
|
| 987 |
+
from urllib.parse import urlparse
|
| 988 |
+
import socket
|
| 989 |
+
parsed_url = urlparse(api_base_url)
|
| 990 |
+
host = parsed_url.hostname
|
| 991 |
+
port = parsed_url.port or (443 if parsed_url.scheme == "https" else 80)
|
| 992 |
+
if not host:
|
| 993 |
+
st.error("Invalid base URL. Please include scheme (http/https) and host.")
|
| 994 |
+
st.stop()
|
| 995 |
+
try:
|
| 996 |
+
with socket.create_connection((host, port), timeout=3):
|
| 997 |
+
pass
|
| 998 |
+
except OSError as exc:
|
| 999 |
+
st.error(f"Unable to connect to {api_base_url}: {exc}")
|
| 1000 |
+
if "localhost" in api_base_url or "127.0.0.1" in api_base_url:
|
| 1001 |
+
st.info("For local providers, ensure the server is running (e.g., `ollama serve`).")
|
| 1002 |
+
st.stop()
|
| 1003 |
+
|
| 1004 |
+
try:
|
| 1005 |
+
import openai
|
| 1006 |
+
|
| 1007 |
+
# Configure client
|
| 1008 |
+
api_key_value = api_key.strip() if api_key else ""
|
| 1009 |
+
if not needs_key and not api_key_value:
|
| 1010 |
+
api_key_value = "local"
|
| 1011 |
+
|
| 1012 |
+
client = openai.OpenAI(
|
| 1013 |
+
api_key=api_key_value,
|
| 1014 |
+
base_url=api_base_url,
|
| 1015 |
+
timeout=request_timeout,
|
| 1016 |
+
max_retries=max_retries
|
| 1017 |
+
)
|
| 1018 |
+
|
| 1019 |
+
responses = []
|
| 1020 |
+
|
| 1021 |
+
with st.spinner(f"Calling LLM API ({num_responses} request(s))..."):
|
| 1022 |
+
for i in range(num_responses):
|
| 1023 |
+
messages = []
|
| 1024 |
+
if system_prompt:
|
| 1025 |
+
messages.append({"role": "system", "content": system_prompt})
|
| 1026 |
+
messages.append({"role": "user", "content": user_prompt})
|
| 1027 |
+
|
| 1028 |
+
try:
|
| 1029 |
+
response = client.chat.completions.create(
|
| 1030 |
+
model=model_name,
|
| 1031 |
+
messages=messages,
|
| 1032 |
+
temperature=temperature
|
| 1033 |
+
)
|
| 1034 |
+
responses.append(response.choices[0].message.content)
|
| 1035 |
+
except openai.APITimeoutError as e:
|
| 1036 |
+
st.error(f"Request timed out on response {i+1}. Try increasing the timeout.")
|
| 1037 |
+
raise
|
| 1038 |
+
except openai.APIConnectionError as e:
|
| 1039 |
+
st.error(f"Connection error on response {i+1}. Check base URL and server status.")
|
| 1040 |
+
raise
|
| 1041 |
+
except openai.OpenAIError as e:
|
| 1042 |
+
st.error(f"OpenAI API error on response {i+1}: {str(e)}")
|
| 1043 |
+
raise
|
| 1044 |
+
except Exception as e:
|
| 1045 |
+
st.error(f"Error on response {i+1}: {str(e)}")
|
| 1046 |
+
if i == 0: # If first call fails, stop
|
| 1047 |
+
raise
|
| 1048 |
+
|
| 1049 |
+
if not responses:
|
| 1050 |
+
st.error("No responses generated")
|
| 1051 |
+
else:
|
| 1052 |
+
st.success(f"✓ Generated {len(responses)} response(s)")
|
| 1053 |
+
|
| 1054 |
+
# Now run the guardrail
|
| 1055 |
+
st.markdown("---")
|
| 1056 |
+
|
| 1057 |
+
with st.spinner("Running Deterministic Guardrail..."):
|
| 1058 |
+
from llm_adapter import DeterministicGuardrail, DeterministicHashEmbedderND
|
| 1059 |
+
import math
|
| 1060 |
+
|
| 1061 |
+
guard = DeterministicGuardrail(
|
| 1062 |
+
substrate_texts=substrate_list,
|
| 1063 |
+
config_preset=selected_gov_preset,
|
| 1064 |
+
topic_gate_enabled=topic_gate_enabled,
|
| 1065 |
+
ambiguity_detection_enabled=ambiguity_detection
|
| 1066 |
+
)
|
| 1067 |
+
|
| 1068 |
+
inspection = guard.inspect(responses)
|
| 1069 |
+
result_text = inspection['selected_text']
|
| 1070 |
+
metrics = inspection['metrics']
|
| 1071 |
+
candidate_metrics = metrics.get('candidates')
|
| 1072 |
+
if candidate_metrics is None:
|
| 1073 |
+
candidate_metrics = [
|
| 1074 |
+
{
|
| 1075 |
+
'phase_log': [],
|
| 1076 |
+
'fractured': False,
|
| 1077 |
+
'fractured_step': None,
|
| 1078 |
+
'stress': 0.0,
|
| 1079 |
+
'hash': 'N/A',
|
| 1080 |
+
}
|
| 1081 |
+
for _ in responses
|
| 1082 |
+
]
|
| 1083 |
+
|
| 1084 |
+
# Build detailed numbers view (high-D physics + 2D projection for plot)
|
| 1085 |
+
embedder = DeterministicHashEmbedderND()
|
| 1086 |
+
sub_vecs = [embedder.embed(t) for t in substrate_list]
|
| 1087 |
+
resp_vecs = [embedder.embed(t) for t in responses]
|
| 1088 |
+
sub_vecs_2d = [embedder.project_2d(v) for v in sub_vecs]
|
| 1089 |
+
resp_vecs_2d = [embedder.project_2d(v) for v in resp_vecs]
|
| 1090 |
+
|
| 1091 |
+
def cosine_similarity(v1, v2):
|
| 1092 |
+
dot = sum(a * b for a, b in zip(v1, v2))
|
| 1093 |
+
mag1 = math.sqrt(sum(a * a for a in v1))
|
| 1094 |
+
mag2 = math.sqrt(sum(b * b for b in v2))
|
| 1095 |
+
if mag1 == 0 or mag2 == 0:
|
| 1096 |
+
return 0.0
|
| 1097 |
+
return dot / (mag1 * mag2)
|
| 1098 |
+
|
| 1099 |
+
def euclidean_distance(v1, v2):
|
| 1100 |
+
return math.sqrt(sum((a - b) ** 2 for a, b in zip(v1, v2)))
|
| 1101 |
+
|
| 1102 |
+
# Display result
|
| 1103 |
+
st.subheader("Guardrail Decision")
|
| 1104 |
+
if result_text:
|
| 1105 |
+
st.success("🟢 **SELECTED RESPONSE**")
|
| 1106 |
+
st.markdown(f"> {result_text}")
|
| 1107 |
+
else:
|
| 1108 |
+
st.warning("🔴 **ABSTAINED** - All responses fractured under stress")
|
| 1109 |
+
st.caption("The guardrail rejected all responses. None met the yield strength requirements.")
|
| 1110 |
+
|
| 1111 |
+
# Show all responses with scores
|
| 1112 |
+
st.markdown("---")
|
| 1113 |
+
st.subheader("Detailed Scoring for All Responses")
|
| 1114 |
+
|
| 1115 |
+
for i, (response, cand_vec) in enumerate(zip(responses, resp_vecs)):
|
| 1116 |
+
cand_metrics = candidate_metrics[i]
|
| 1117 |
+
is_selected = (result_text == response)
|
| 1118 |
+
|
| 1119 |
+
# Compute alignment to best substrate
|
| 1120 |
+
best_alignment = -1
|
| 1121 |
+
best_substrate = None
|
| 1122 |
+
best_cos_sim = 0
|
| 1123 |
+
|
| 1124 |
+
for j, sub_vec in enumerate(sub_vecs):
|
| 1125 |
+
cos_sim = cosine_similarity(cand_vec, sub_vec)
|
| 1126 |
+
alignment = (cos_sim + 1.0) / 2.0
|
| 1127 |
+
if alignment > best_alignment:
|
| 1128 |
+
best_alignment = alignment
|
| 1129 |
+
best_substrate = substrate_list[j]
|
| 1130 |
+
best_cos_sim = cos_sim
|
| 1131 |
+
|
| 1132 |
+
# Display card
|
| 1133 |
+
|
| 1134 |
+
fractured = cand_metrics.get('fractured', False)
|
| 1135 |
+
out_of_domain = cand_metrics.get('out_of_domain', False)
|
| 1136 |
+
if is_selected:
|
| 1137 |
+
status = "SELECTED"
|
| 1138 |
+
elif out_of_domain:
|
| 1139 |
+
status = "EXCLUDED (Topic Gate)"
|
| 1140 |
+
elif fractured:
|
| 1141 |
+
status = "EXCLUDED (Fractured)"
|
| 1142 |
+
else:
|
| 1143 |
+
status = "SURVIVED (Not Selected)"
|
| 1144 |
+
|
| 1145 |
+
with st.expander(f"**Response {i+1}** - {status}", expanded=is_selected):
|
| 1146 |
+
st.markdown(f"**Full Response:**")
|
| 1147 |
+
st.info(response)
|
| 1148 |
+
st.markdown("---")
|
| 1149 |
+
|
| 1150 |
+
# Metrics
|
| 1151 |
+
col1, col2, col3, col4 = st.columns(4)
|
| 1152 |
+
with col1:
|
| 1153 |
+
st.metric("Alignment Score", f"{best_alignment:.4f}", help="Normalized cosine similarity: 0=opposite, 0.5=orthogonal, 1=identical")
|
| 1154 |
+
with col2:
|
| 1155 |
+
st.metric("Cosine Similarity", f"{best_cos_sim:.4f}", help="Raw cosine similarity: -1 to 1")
|
| 1156 |
+
with col3:
|
| 1157 |
+
st.metric("Final Stress σ", f"{cand_metrics['stress']:.4f}")
|
| 1158 |
+
with col4:
|
| 1159 |
+
if out_of_domain:
|
| 1160 |
+
st.metric("Status", "Topic-gated")
|
| 1161 |
+
else:
|
| 1162 |
+
st.metric("Status", "Intact" if not fractured else "Fractured")
|
| 1163 |
+
|
| 1164 |
+
st.caption(f"**Best substrate match:** *\"{best_substrate}\"*")
|
| 1165 |
+
|
| 1166 |
+
# Show stress evolution chart
|
| 1167 |
+
st.markdown("**Stress Evolution**")
|
| 1168 |
+
phase_log = cand_metrics.get('phase_log', [])
|
| 1169 |
+
stress_data = [entry['stress'] for entry in phase_log]
|
| 1170 |
+
steps = list(range(len(stress_data)))
|
| 1171 |
+
|
| 1172 |
+
fig_stress = go.Figure()
|
| 1173 |
+
fig_stress.add_trace(go.Scatter(
|
| 1174 |
+
x=steps,
|
| 1175 |
+
y=stress_data,
|
| 1176 |
+
mode='lines+markers',
|
| 1177 |
+
name='Cumulative Stress',
|
| 1178 |
+
line=dict(color='red' if fractured else 'green', width=2),
|
| 1179 |
+
marker=dict(size=6)
|
| 1180 |
+
))
|
| 1181 |
+
|
| 1182 |
+
fig_stress.update_layout(
|
| 1183 |
+
title=f"Stress Accumulation - Response {i+1}",
|
| 1184 |
+
yaxis_title="Cumulative Stress σ(k)",
|
| 1185 |
+
xaxis_title="Inference Step k",
|
| 1186 |
+
height=300,
|
| 1187 |
+
template="plotly_dark",
|
| 1188 |
+
showlegend=True
|
| 1189 |
+
)
|
| 1190 |
+
st.plotly_chart(fig_stress, use_container_width=True)
|
| 1191 |
+
|
| 1192 |
+
# Summary metrics
|
| 1193 |
+
st.markdown("---")
|
| 1194 |
+
st.subheader("Summary Statistics")
|
| 1195 |
+
summary_cols = st.columns(4)
|
| 1196 |
+
with summary_cols[0]:
|
| 1197 |
+
st.metric("Total Responses", len(responses))
|
| 1198 |
+
with summary_cols[1]:
|
| 1199 |
+
st.metric("Excluded", metrics['total_excluded'])
|
| 1200 |
+
with summary_cols[2]:
|
| 1201 |
+
st.metric("Survived", len(responses) - metrics['total_excluded'])
|
| 1202 |
+
with summary_cols[3]:
|
| 1203 |
+
selected = 1 if result_text else 0
|
| 1204 |
+
st.metric("Selected", selected)
|
| 1205 |
+
|
| 1206 |
+
# Detailed audit trail
|
| 1207 |
+
with st.expander("📊 Complete Numerical Audit Trail (JSON)", expanded=False):
|
| 1208 |
+
st.caption("Full vectors, distances, comparisons, and stress evolution for reproducibility")
|
| 1209 |
+
|
| 1210 |
+
numbers_view = {
|
| 1211 |
+
"test_metadata": {
|
| 1212 |
+
"provider": api_preset,
|
| 1213 |
+
"base_url": api_base_url,
|
| 1214 |
+
"model": model_name,
|
| 1215 |
+
"temperature": temperature,
|
| 1216 |
+
"num_responses": len(responses),
|
| 1217 |
+
"num_substrate_facts": len(substrate_list),
|
| 1218 |
+
"prompt": user_prompt,
|
| 1219 |
+
"system_prompt": system_prompt if system_prompt else "None"
|
| 1220 |
+
},
|
| 1221 |
+
"embedder": {
|
| 1222 |
+
"name": "DeterministicHashEmbedderND",
|
| 1223 |
+
"description": "Deterministic SHA-256 based 16D projection (2D shown for plotting)",
|
| 1224 |
+
"definition": "sha256(text) -> 16D in [0,1], projected to 2D"
|
| 1225 |
+
},
|
| 1226 |
+
"substrate": [
|
| 1227 |
+
{"index": idx, "text": t, "vec2": [round(v[0], 8), round(v[1], 8)]}
|
| 1228 |
+
for idx, (t, v) in enumerate(zip(substrate_list, sub_vecs_2d))
|
| 1229 |
+
],
|
| 1230 |
+
"responses": []
|
| 1231 |
+
}
|
| 1232 |
+
|
| 1233 |
+
for i, (response, resp_vec) in enumerate(zip(responses, resp_vecs)):
|
| 1234 |
+
cand_metrics = candidate_metrics[i]
|
| 1235 |
+
|
| 1236 |
+
# Compute all substrate comparisons
|
| 1237 |
+
comparisons = []
|
| 1238 |
+
for j, sub_vec in enumerate(sub_vecs):
|
| 1239 |
+
cos_sim = cosine_similarity(resp_vec, sub_vec)
|
| 1240 |
+
alignment = (cos_sim + 1.0) / 2.0
|
| 1241 |
+
dist = euclidean_distance(resp_vec, sub_vec)
|
| 1242 |
+
|
| 1243 |
+
comparisons.append({
|
| 1244 |
+
"substrate_index": j,
|
| 1245 |
+
"substrate_text": substrate_list[j],
|
| 1246 |
+
"cosine_similarity": round(cos_sim, 8),
|
| 1247 |
+
"alignment_0_1": round(alignment, 8),
|
| 1248 |
+
"euclidean_distance": round(dist, 8),
|
| 1249 |
+
})
|
| 1250 |
+
|
| 1251 |
+
numbers_view["responses"].append({
|
| 1252 |
+
"response_index": i,
|
| 1253 |
+
"text": response,
|
| 1254 |
+
"vec2": [round(resp_vecs_2d[i][0], 8), round(resp_vecs_2d[i][1], 8)],
|
| 1255 |
+
"substrate_comparisons": comparisons,
|
| 1256 |
+
"engine_results": {
|
| 1257 |
+
"fractured": cand_metrics.get('fractured', False),
|
| 1258 |
+
"fractured_at_step": cand_metrics.get('fractured_step'),
|
| 1259 |
+
"final_stress": round(float(cand_metrics['stress']), 8),
|
| 1260 |
+
"determinism_hash": cand_metrics.get('hash', 'N/A')
|
| 1261 |
+
}
|
| 1262 |
+
})
|
| 1263 |
+
|
| 1264 |
+
st.json(numbers_view)
|
| 1265 |
+
|
| 1266 |
+
except ImportError:
|
| 1267 |
+
st.error("Missing `openai` library. Install with: `pip install openai`")
|
| 1268 |
+
except Exception as e:
|
| 1269 |
+
st.error(f"Error: {str(e)}")
|
| 1270 |
+
if "401" in str(e) or "authentication" in str(e).lower():
|
| 1271 |
+
st.info("💡 Check your API key and make sure it's valid for the selected endpoint")
|
| 1272 |
+
elif "404" in str(e) or "not found" in str(e).lower():
|
| 1273 |
+
st.info("💡 Check your model name and base URL. For local models, make sure the server is running.")
|
| 1274 |
+
st.exception(e)
|
| 1275 |
+
|
| 1276 |
+
# ----------------------------------------------------------------------------
|
| 1277 |
+
# TAB 4: Explain & Tune
|
| 1278 |
+
# ----------------------------------------------------------------------------
|
| 1279 |
+
with tab4:
|
| 1280 |
+
st.header("🔧 Interactive Parameter Tuning")
|
| 1281 |
+
st.markdown("Select parameters to visualize their combined effect on the governance physics.")
|
| 1282 |
+
|
| 1283 |
+
# 1. State Tracking for "Active Explanation"
|
| 1284 |
+
if 'last_params' not in st.session_state:
|
| 1285 |
+
st.session_state.last_params = {
|
| 1286 |
+
'nuc': 0.4, 'quench': 0.75, 'lam': 1.2, 'yield': 1.5, 'align': 0.85, 'dist': 0.3
|
| 1287 |
+
}
|
| 1288 |
+
if 'active_topic' not in st.session_state:
|
| 1289 |
+
st.session_state.active_topic = "General"
|
| 1290 |
+
|
| 1291 |
+
# 1. Controls
|
| 1292 |
+
exp_col1, exp_col2 = st.columns([1, 2])
|
| 1293 |
+
|
| 1294 |
+
with exp_col1:
|
| 1295 |
+
st.subheader("Controls")
|
| 1296 |
+
st.caption("⚠️ **Educational Visualization**: This simulation uses the production physics engine but is intended for parameter intuition.")
|
| 1297 |
+
|
| 1298 |
+
# Multi-select for visualization layers
|
| 1299 |
+
focused_params = st.multiselect(
|
| 1300 |
+
"Visualized Layers",
|
| 1301 |
+
options=["Nucleation Phase", "Quenching Phase", "Max Pressure (lambda)", "Yield Strength (sigma_y)"],
|
| 1302 |
+
default=["Max Pressure (lambda)"],
|
| 1303 |
+
help="Select multiple layers to see how they interact"
|
| 1304 |
+
)
|
| 1305 |
+
|
| 1306 |
+
st.markdown("---")
|
| 1307 |
+
st.markdown("**Physics Parameters**")
|
| 1308 |
+
|
| 1309 |
+
# Sliders with callbacks to update active topic
|
| 1310 |
+
e_nuc = st.slider("Nucleation Fraction", 0.05, 0.9, 0.4, 0.05, key="slider_nuc")
|
| 1311 |
+
e_quench = st.slider("Quenching Fraction", 0.05, 0.95, 0.75, 0.05, key="slider_quench")
|
| 1312 |
+
e_lam = st.slider("Lambda Max (lambda)", 0.1, 4.0, 1.2, 0.1, key="slider_lam")
|
| 1313 |
+
e_yield = st.slider("Yield Strength (sigma_y)", 0.1, 5.0, 1.5, 0.1, key="slider_yield")
|
| 1314 |
+
|
| 1315 |
+
st.markdown("**Theoretical Simulation**")
|
| 1316 |
+
sim_align = st.slider("Target Alignment", 0.0, 1.0, 0.85, 0.01, key="slider_align")
|
| 1317 |
+
sim_dist = st.slider("Target Distance", 0.0, 2.0, 0.3, 0.01, key="slider_dist")
|
| 1318 |
+
|
| 1319 |
+
# ... (Detection logic remains same)
|
| 1320 |
+
current_params = {
|
| 1321 |
+
'nuc': e_nuc, 'quench': e_quench, 'lam': e_lam,
|
| 1322 |
+
'yield': e_yield, 'align': sim_align, 'dist': sim_dist
|
| 1323 |
+
}
|
| 1324 |
+
for param, val in current_params.items():
|
| 1325 |
+
if val != st.session_state.last_params.get(param):
|
| 1326 |
+
st.session_state.active_topic = param
|
| 1327 |
+
st.session_state.last_params[param] = val
|
| 1328 |
+
break
|
| 1329 |
+
|
| 1330 |
+
# 2. Simulation Logic (Production Backend)
|
| 1331 |
+
def run_simulation(steps, nuc, quench, lam_max, yld, align, dist):
|
| 1332 |
+
from material_field_engine import MaterialFieldEngine, VerifiedSubstrate, Vector2D, fp_from_float
|
| 1333 |
+
|
| 1334 |
+
# To strictly follow the "one source of truth" principle, we drive the
|
| 1335 |
+
# visualization from the exact same engine code used in production.
|
| 1336 |
+
|
| 1337 |
+
substrate = VerifiedSubstrate(
|
| 1338 |
+
elastic_modulus_mode=st.session_state.config.get('elastic_modulus_mode', 'multiplicative'),
|
| 1339 |
+
elastic_modulus_sigma=st.session_state.config.get('elastic_modulus_sigma', 0.5)
|
| 1340 |
+
)
|
| 1341 |
+
# Add a reference point for alignment calculations
|
| 1342 |
+
substrate.add_verified_state(Vector2D(x=1.0, y=1.0, properties=None))
|
| 1343 |
+
|
| 1344 |
+
engine = MaterialFieldEngine(
|
| 1345 |
+
substrate,
|
| 1346 |
+
lambda_max=lam_max,
|
| 1347 |
+
inference_steps=steps
|
| 1348 |
+
)
|
| 1349 |
+
engine.phase_controller.nucleation_threshold = nuc
|
| 1350 |
+
engine.phase_controller.quenching_threshold = quench
|
| 1351 |
+
|
| 1352 |
+
# Initialize candidate at distance 'dist' from the reference
|
| 1353 |
+
engine.initialize_candidates([[1.0 + dist, 1.0]])
|
| 1354 |
+
|
| 1355 |
+
# Force yield strength to match slider for educational clarity
|
| 1356 |
+
if engine.candidate_vectors:
|
| 1357 |
+
engine.candidate_vectors[0].properties.yield_strength_q = fp_from_float(yld)
|
| 1358 |
+
|
| 1359 |
+
res = engine.run_inference(collect_trace=True)
|
| 1360 |
+
|
| 1361 |
+
history = []
|
| 1362 |
+
if res.get('candidates'):
|
| 1363 |
+
cand_trace = res['candidates'][0]
|
| 1364 |
+
for entry in cand_trace['phase_log']:
|
| 1365 |
+
history.append({
|
| 1366 |
+
"step": entry['step'],
|
| 1367 |
+
"stress": entry['stress'],
|
| 1368 |
+
"lambda": entry['pressure'],
|
| 1369 |
+
"phase": entry['phase']
|
| 1370 |
+
})
|
| 1371 |
+
|
| 1372 |
+
n_end = int(steps * nuc)
|
| 1373 |
+
q_end = int(steps * quench)
|
| 1374 |
+
return history, n_end, q_end
|
| 1375 |
+
|
| 1376 |
+
sim_data, sim_n_end, sim_q_end = run_simulation(20, e_nuc, e_quench, e_lam, e_yield, sim_align, sim_dist)
|
| 1377 |
+
|
| 1378 |
+
|
| 1379 |
+
# 3. Visualization (Plotly)
|
| 1380 |
+
with exp_col2:
|
| 1381 |
+
st.subheader("Effect Visualization")
|
| 1382 |
+
|
| 1383 |
+
fig = go.Figure()
|
| 1384 |
+
|
| 1385 |
+
steps = [d['step'] for d in sim_data]
|
| 1386 |
+
stress = [d['stress'] for d in sim_data]
|
| 1387 |
+
lams = [d['lambda'] for d in sim_data]
|
| 1388 |
+
|
| 1389 |
+
# Base Curves
|
| 1390 |
+
fig.add_trace(go.Scatter(x=steps, y=stress, name='Stress σ', line=dict(color='blue', width=3)))
|
| 1391 |
+
fig.add_trace(go.Scatter(x=steps, y=lams, name='Pressure λ', line=dict(color='green', width=2, dash='dash')))
|
| 1392 |
+
|
| 1393 |
+
# Interactive Layers
|
| 1394 |
+
if "Nucleation Phase" in focused_params or st.session_state.active_topic == 'nuc':
|
| 1395 |
+
fig.add_vrect(x0=0, x1=sim_n_end, fillcolor="yellow", opacity=0.15, annotation_text="Nucleation", annotation_position="top left")
|
| 1396 |
+
|
| 1397 |
+
if "Quenching Phase" in focused_params or st.session_state.active_topic == 'quench':
|
| 1398 |
+
fig.add_vrect(x0=sim_n_end, x1=sim_q_end, fillcolor="orange", opacity=0.15, annotation_text="Quenching", annotation_position="top left")
|
| 1399 |
+
|
| 1400 |
+
if "Max Pressure (lambda)" in focused_params or st.session_state.active_topic == 'lam':
|
| 1401 |
+
fig.add_hline(y=e_lam, line_color="green", line_dash="dot", annotation_text=f"Max Pressure {e_lam}", annotation_position="bottom right")
|
| 1402 |
+
|
| 1403 |
+
if "Yield Strength (sigma_y)" in focused_params or st.session_state.active_topic == 'yield':
|
| 1404 |
+
fig.add_hline(y=e_yield, line_color="red", line_width=3, annotation_text=f"Yield {e_yield}")
|
| 1405 |
+
|
| 1406 |
+
fig.update_layout(
|
| 1407 |
+
title="Governance Physics Simulation",
|
| 1408 |
+
xaxis_title="Time Steps",
|
| 1409 |
+
yaxis_title="Magnitude",
|
| 1410 |
+
height=400,
|
| 1411 |
+
margin=dict(l=20, r=20, t=40, b=20),
|
| 1412 |
+
hovermode="x unified"
|
| 1413 |
+
)
|
| 1414 |
+
st.plotly_chart(fig, use_container_width=True)
|
| 1415 |
+
|
| 1416 |
+
# 4. Deep Dive Explanation (Dynamic)
|
| 1417 |
+
st.subheader(f"Deep Dive: {st.session_state.active_topic.upper()}")
|
| 1418 |
+
|
| 1419 |
+
topic = st.session_state.active_topic
|
| 1420 |
+
if topic == 'nuc':
|
| 1421 |
+
st.info("""
|
| 1422 |
+
**WHAT**: Nucleation Fraction (Time available for initial alignment).
|
| 1423 |
+
|
| 1424 |
+
**HOW**: Defines the percentage of the timeline (steps 0 to N) where the system is "listening" before applying significant pressure.
|
| 1425 |
+
|
| 1426 |
+
**WHY**:
|
| 1427 |
+
- **Too Short**: The system acts impulsively, fracturing valid ideas before they can stabilize.
|
| 1428 |
+
- **Too Long**: The system dithers, allowing hallucinations to persist too long.
|
| 1429 |
+
|
| 1430 |
+
**WHO**: Tuned by governance architects to match the "patience" required for the domain (e.g., Creative writing needs long nucleation; Safety systems need short).
|
| 1431 |
+
""")
|
| 1432 |
+
elif topic == 'quench':
|
| 1433 |
+
st.info("""
|
| 1434 |
+
**WHAT**: Quenching Fraction (The annealing window).
|
| 1435 |
+
|
| 1436 |
+
**HOW**: Defines the period where pressure ramps up linearly to testing levels.
|
| 1437 |
+
|
| 1438 |
+
**WHY**: This is the "soft filter" phase. Weak candidates (low alignment) are slowly crushed here, while strong candidates gain strength to survive the final crystallization.
|
| 1439 |
+
""")
|
| 1440 |
+
elif topic == 'lam':
|
| 1441 |
+
st.info("""
|
| 1442 |
+
**WHAT**: Lambda Max (λ_max) - The Maximum Exclusion Pressure.
|
| 1443 |
+
|
| 1444 |
+
**HOW**: Represents the "weight" of the governance mechanism. It is a multiplier on the error signal.
|
| 1445 |
+
|
| 1446 |
+
**WHY**:
|
| 1447 |
+
- **High (1.5+)**: "Mission Critical" mode. Even minor deviations cause instant fracture.
|
| 1448 |
+
- **Low (0.5)**: "Forgiving" mode. Only egregious hallucinations are blocked.
|
| 1449 |
+
|
| 1450 |
+
**RELATION**: Stress = λ * (1 - Alignment). If λ is huge, even 99% alignment might not be enough.
|
| 1451 |
+
""")
|
| 1452 |
+
elif topic == 'yield':
|
| 1453 |
+
st.info("""
|
| 1454 |
+
**WHAT**: Yield Strength (σ_y) - The Breaking Point.
|
| 1455 |
+
|
| 1456 |
+
**HOW**: A hard threshold. If accumulated Stress > Yield, the candidate is Rejected (Fractured).
|
| 1457 |
+
|
| 1458 |
+
**WHY**: This defines the ultimate binary decision boundary.
|
| 1459 |
+
|
| 1460 |
+
**IMPACT**: Raising this bar makes the system more "resilient" (harder to fracture). Lowering it makes it "brittle" (easy to fracture).
|
| 1461 |
+
""")
|
| 1462 |
+
elif topic in ['align', 'dist']:
|
| 1463 |
+
st.info("""
|
| 1464 |
+
**WHAT**: Candidate Properties (Hypothetical Input).
|
| 1465 |
+
|
| 1466 |
+
**HOW**:
|
| 1467 |
+
- **Alignment**: How semantically close the LLM output is to the Verified Substrate (1.0 = Perfect).
|
| 1468 |
+
- **Distance**: The spatial distance in the high-dimensional RBF field (0.0 = Perfect).
|
| 1469 |
+
|
| 1470 |
+
**WHY**: Use these sliders to test *what if* scenarios. "What if the LLM produces a weak answer (Align=0.4)? Will it survive the current Lambda setting?"
|
| 1471 |
+
""")
|
| 1472 |
+
else:
|
| 1473 |
+
st.markdown("*Adjust any slider on the left to see a detailed breakdown of its function.*")
|
| 1474 |
+
|
| 1475 |
+
|
| 1476 |
+
# ============================================================================
|
| 1477 |
+
# Compact Footer
|
| 1478 |
+
# ============================================================================
|
| 1479 |
+
|
| 1480 |
+
st.markdown("---")
|
| 1481 |
+
st.caption("© 2026 Verhash LLC | Deterministic Governance Reference Implementation")
|
| 1482 |
+
#!/usr/bin/env python3
|
| 1483 |
+
"""
|
| 1484 |
+
Deterministic Exclusion Demo GUI (Development Build)
|
| 1485 |
+
FastAPI + Streamlit implementation for rapid prototyping
|
| 1486 |
+
|
| 1487 |
+
Usage:
|
| 1488 |
+
streamlit run demo_gui_dev.py
|
| 1489 |
+
"""
|
| 1490 |
+
|
| 1491 |
+
try:
|
| 1492 |
+
import streamlit as st
|
| 1493 |
+
import plotly.graph_objects as go
|
| 1494 |
+
import plotly.express as px
|
| 1495 |
+
except ModuleNotFoundError as exc:
|
| 1496 |
+
missing = str(exc).split("No module named ", 1)[-1].strip("'\"")
|
| 1497 |
+
print(f"Missing optional GUI dependency: {missing}")
|
| 1498 |
+
print("Install GUI deps: python -m pip install -r requirements-gui.txt")
|
| 1499 |
+
print("Run the GUI via: streamlit run demo_gui_dev.py")
|
| 1500 |
+
raise SystemExit(2)
|
| 1501 |
+
|
| 1502 |
+
try:
|
| 1503 |
+
from streamlit.runtime.scriptrunner.script_run_context import get_script_run_ctx
|
| 1504 |
+
except Exception:
|
| 1505 |
+
get_script_run_ctx = None
|
| 1506 |
+
|
| 1507 |
+
if get_script_run_ctx is not None and get_script_run_ctx() is None:
|
| 1508 |
+
print("This file is a Streamlit app and must be run with Streamlit:")
|
| 1509 |
+
print(" streamlit run demo_gui_dev.py")
|
| 1510 |
+
raise SystemExit(2)
|
| 1511 |
+
import time
|
| 1512 |
+
import hashlib
|
| 1513 |
+
import json
|
| 1514 |
+
from pathlib import Path
|
| 1515 |
+
from collections import deque
|
| 1516 |
+
from typing import List, Dict, Optional, Any, Tuple
|
| 1517 |
+
import os
|
| 1518 |
+
|
| 1519 |
+
# Hugging Face Spaces detection
|
| 1520 |
+
IS_SPACES = os.getenv("SPACE_ID") is not None
|
| 1521 |
+
|
| 1522 |
+
# Page Config MUST be the first Streamlit command
|
| 1523 |
+
st.set_page_config(
|
| 1524 |
+
page_title="Deterministic Exclusion Demo",
|
| 1525 |
+
layout="wide",
|
| 1526 |
+
initial_sidebar_state="expanded",
|
| 1527 |
+
menu_items={
|
| 1528 |
+
'Get Help': 'https://github.com/Rymley/Deterministic-Governance-Mechanism',
|
| 1529 |
+
'Report a bug': "https://github.com/Rymley/Deterministic-Governance-Mechanism/issues",
|
| 1530 |
+
'About': "Deterministic Governance Mechanism by Verhash LLC"
|
| 1531 |
+
}
|
| 1532 |
+
)
|
| 1533 |
+
|
| 1534 |
+
# Import engine components
|
| 1535 |
+
import sys
|
| 1536 |
+
sys.path.insert(0, str(Path(__file__).parent))
|
| 1537 |
+
|
| 1538 |
+
from material_field_engine import (
|
| 1539 |
+
VerifiedSubstrate, Vector2D, MaterialFieldEngine, fp_from_float, fp_to_float, load_config
|
| 1540 |
+
)
|
| 1541 |
+
from exclusion_demo import run_deterministic_exclusion_demo
|
| 1542 |
+
|
| 1543 |
+
@st.cache_data
|
| 1544 |
+
def compute_substrate_embeddings_2d(substrate_list: List[str]) -> List[List[float]]:
|
| 1545 |
+
"""Cached 2D embedding for material field visualization."""
|
| 1546 |
+
from llm_adapter import DeterministicHashEmbedderND
|
| 1547 |
+
embedder = DeterministicHashEmbedderND(dim=2)
|
| 1548 |
+
return [embedder.embed(t) for t in substrate_list]
|
| 1549 |
+
|
| 1550 |
+
@st.cache_data
|
| 1551 |
+
def compute_substrate_embeddings_highd(substrate_list: List[str]) -> List[List[float]]:
|
| 1552 |
+
"""Cached 16D embedding for physics engine logic."""
|
| 1553 |
+
from llm_adapter import DeterministicHashEmbedderND
|
| 1554 |
+
embedder = DeterministicHashEmbedderND(dim=16)
|
| 1555 |
+
return [embedder.embed(t) for t in substrate_list]
|
| 1556 |
+
|
| 1557 |
+
|
| 1558 |
+
# Custom CSS for monospace hash display
|
| 1559 |
+
|
| 1560 |
+
st.markdown("""
|
| 1561 |
+
<style>
|
| 1562 |
+
.hash-display {
|
| 1563 |
+
font-family: 'Courier New', monospace;
|
| 1564 |
+
font-size: 14px;
|
| 1565 |
+
background-color: #1e1e1e;
|
| 1566 |
+
color: #00ff00;
|
| 1567 |
+
padding: 10px;
|
| 1568 |
+
border-radius: 5px;
|
| 1569 |
+
word-break: break-all;
|
| 1570 |
+
}
|
| 1571 |
+
|
| 1572 |
+
/* Verhash Brand Colors - Professional Slate & Blue */
|
| 1573 |
+
.stApp {
|
| 1574 |
+
background-color: #020617; /* Slate 950 */
|
| 1575 |
+
color: #f1f5f9; /* Slate 100 */
|
| 1576 |
+
}
|
| 1577 |
+
|
| 1578 |
+
/* Headers */
|
| 1579 |
+
h1, h2, h3 {
|
| 1580 |
+
color: #f1f5f9 !important; /* Slate 100 */
|
| 1581 |
+
font-weight: 700 !important;
|
| 1582 |
+
}
|
| 1583 |
+
|
| 1584 |
+
/* Links */
|
| 1585 |
+
a {
|
| 1586 |
+
color: #005981 !important; /* Sky 500 */
|
| 1587 |
+
text-decoration: none !important;
|
| 1588 |
+
}
|
| 1589 |
+
a:hover {
|
| 1590 |
+
color: #38bdf8 !important; /* Sky 400 */
|
| 1591 |
+
text-decoration: underline !important;
|
| 1592 |
+
}
|
| 1593 |
+
|
| 1594 |
+
/* Button styling */
|
| 1595 |
+
.stButton > button {
|
| 1596 |
+
background-color: #005981;
|
| 1597 |
+
color: #f8fafc;
|
| 1598 |
+
border: none;
|
| 1599 |
+
font-weight: 600;
|
| 1600 |
+
border-radius: 6px;
|
| 1601 |
+
transition: all 0.2s ease;
|
| 1602 |
+
}
|
| 1603 |
+
|
| 1604 |
+
.stButton > button:hover {
|
| 1605 |
+
background-color: #38bdf8;
|
| 1606 |
+
color: #f8fafc;
|
| 1607 |
+
box-shadow: 0 4px 12px rgba(14, 165, 233, 0.2);
|
| 1608 |
+
}
|
| 1609 |
+
|
| 1610 |
+
/* Sidebar branding */
|
| 1611 |
+
[data-testid="stSidebar"] {
|
| 1612 |
+
background-color: #0f172a; /* Slate 900 */
|
| 1613 |
+
border-right: 1px solid #1e293b;
|
| 1614 |
+
}
|
| 1615 |
+
|
| 1616 |
+
/* Metric styling */
|
| 1617 |
+
[data-testid="stMetricValue"] {
|
| 1618 |
+
color: #005981;
|
| 1619 |
+
}
|
| 1620 |
+
|
| 1621 |
+
.main-header {
|
| 1622 |
+
text-align: center;
|
| 1623 |
+
padding: 40px 0 20px 0;
|
| 1624 |
+
margin-bottom: 20px;
|
| 1625 |
+
}
|
| 1626 |
+
.company-name {
|
| 1627 |
+
color: #64748b; /* Slate 500 */
|
| 1628 |
+
font-size: 16px;
|
| 1629 |
+
letter-spacing: 2px;
|
| 1630 |
+
text-transform: uppercase;
|
| 1631 |
+
margin-top: 8px;
|
| 1632 |
+
}
|
| 1633 |
+
.nav-links {
|
| 1634 |
+
text-align: center;
|
| 1635 |
+
padding: 15px 0;
|
| 1636 |
+
background: rgba(15, 23, 42, 0.3);
|
| 1637 |
+
border-radius: 8px;
|
| 1638 |
+
border: 1px solid #1e293b;
|
| 1639 |
+
margin-bottom: 30px;
|
| 1640 |
+
}
|
| 1641 |
+
.nav-links a {
|
| 1642 |
+
color: #005981;
|
| 1643 |
+
padding: 0 20px;
|
| 1644 |
+
font-size: 14px;
|
| 1645 |
+
}
|
| 1646 |
+
|
| 1647 |
+
/* Tab Styling */
|
| 1648 |
+
.stTabs [data-baseweb="tab-list"] {
|
| 1649 |
+
gap: 8px;
|
| 1650 |
+
background-color: transparent;
|
| 1651 |
+
}
|
| 1652 |
+
|
| 1653 |
+
.stTabs [data-baseweb="tab"] {
|
| 1654 |
+
height: 40px;
|
| 1655 |
+
white-space: pre-wrap;
|
| 1656 |
+
background-color: #0f172a; /* Offset Color: Slate 900 */
|
| 1657 |
+
border-radius: 4px 4px 0 0;
|
| 1658 |
+
gap: 1px;
|
| 1659 |
+
padding-top: 10px;
|
| 1660 |
+
padding-bottom: 10px;
|
| 1661 |
+
color: #94a3b8; /* Slate 400 */
|
| 1662 |
+
border: 1px solid #1e293b;
|
| 1663 |
+
border-bottom: none;
|
| 1664 |
+
transition: all 0.2s ease;
|
| 1665 |
+
}
|
| 1666 |
+
|
| 1667 |
+
.stTabs [data-baseweb="tab"]:hover {
|
| 1668 |
+
color: #f1f5f9;
|
| 1669 |
+
background-color: #1e293b;
|
| 1670 |
+
}
|
| 1671 |
+
|
| 1672 |
+
.stTabs [data-baseweb="tab"][aria-selected="true"] {
|
| 1673 |
+
background-color: #005981 !important; /* Select Color: Sky 500 */
|
| 1674 |
+
color: #ffffff !important;
|
| 1675 |
+
border: 1px solid #005981;
|
| 1676 |
+
}
|
| 1677 |
+
|
| 1678 |
+
/* Hide default highlight bar */
|
| 1679 |
+
[data-baseweb="tab-highlight"] {
|
| 1680 |
+
background-color: transparent !important;
|
| 1681 |
+
}
|
| 1682 |
+
|
| 1683 |
+
/* Slider Styling */
|
| 1684 |
+
.stSlider [data-baseweb="slider"] {
|
| 1685 |
+
background-color: transparent;
|
| 1686 |
+
}
|
| 1687 |
+
.stSlider [data-baseweb="slider"] > div > div {
|
| 1688 |
+
background: #005981 !important;
|
| 1689 |
+
}
|
| 1690 |
+
.stSlider [data-baseweb="slider"] [role="slider"] {
|
| 1691 |
+
background-color: #005981;
|
| 1692 |
border: 2px solid #020617;
|
| 1693 |
}
|
| 1694 |
|