prakharg24 commited on
Commit
ac15dd5
·
verified ·
1 Parent(s): b29f0e3

Update my_pages/ica.py

Browse files
Files changed (1) hide show
  1. my_pages/ica.py +98 -83
my_pages/ica.py CHANGED
@@ -1,9 +1,9 @@
1
  import streamlit as st
 
2
  import numpy as np
3
- import plotly.graph_objects as go
4
- from streamlit_plotly_events import plotly_events
5
  from utils import add_navigation, add_instruction_text, add_red_text
6
 
 
7
 
8
  def render():
9
  add_navigation("txt_ica", "txt_multiverse")
@@ -11,7 +11,7 @@ def render():
11
  add_instruction_text(
12
  """
13
  Explore the intention-convention-arbitrariness (ICA) framework.<br>
14
- Click inside the ICA triangle to move the torch and uncover examples.
15
  """
16
  )
17
 
@@ -22,93 +22,108 @@ def render():
22
  "Arbitrary": 0.34
23
  }
24
 
25
- # --- Locations (examples) ---
26
- locations = [
27
- (0.1, 0.1, 0.8, "Random Seeds",
28
- "Random Seeds are highly arbitrary, without any convention or intentionality."),
29
- (0.6, 0.3, 0.1, "Neural networks for Tabular Data",
30
- "Using neural networks of some arbitrary size (hidden layers) for a setting "
31
- "where they are not needed is highly conventional, a bit arbitrary, and has very low intentionality."),
32
- (0.2, 0.6, 0.2, "Pre-trained LLM for a Complex Task",
33
- "Using a high performing LLM for a complex task is intentional, however, it also has "
34
- "conventionality to it. No arbitrariness."),
35
- (0.7, 0.25, 0.05, "Best Bias Mitigation for a Particular Setup",
36
- "Choosing the most appropriate bias mitigation technique, specialized for the particular context, is highly intentional."),
37
- (0.45, 0.05, 0.5, "Randomly chosen Regularization Technique",
38
- "Adding regularization to improve robustness, but choosing the regularization technique randomly, "
39
- "creates a decision that is intentional and arbitrary, while avoiding conventionality."),
40
- (0.05, 0.9, 0.05, "ReLU Activation as Default",
41
- "Choosing ReLU activation without testing what other activations might also work, is a highly conventional decision."),
42
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
- # Torch radius
45
- torch_radius = 0.2
46
-
47
- # --- Create Plotly Figure ---
48
- fig = go.Figure()
49
-
50
- # Triangle border (ternary axes are already a triangle)
51
- fig.update_layout(
52
- ternary=dict(
53
- sum=1,
54
- aaxis=dict(title="Intentional", min=0.0, linewidth=2, ticks='outside'),
55
- baxis=dict(title="Conventional", min=0.0, linewidth=2, ticks='outside'),
56
- caxis=dict(title="Arbitrary", min=0.0, linewidth=2, ticks='outside'),
57
- bgcolor="black"
58
- ),
59
- paper_bgcolor="rgba(0,0,0,0)",
60
- plot_bgcolor="rgba(0,0,0,0)",
61
- showlegend=False,
62
- margin=dict(l=50, r=50, b=50, t=50)
63
  )
64
 
65
- # Add example points (faded by default)
66
- for (a, b, c, label, _) in locations:
67
- fig.add_trace(go.Scatterternary(
68
- a=[a], b=[b], c=[c],
69
- mode='markers+text',
70
- marker=dict(size=8, color='red', opacity=0.3),
71
- text=[label],
72
- textposition="top center",
73
- hoverinfo="text"
74
- ))
75
-
76
- # Torch (initial position)
77
- w = st.session_state.weights
78
- fig.add_trace(go.Scatterternary(
79
- a=[w["Intentional"]], b=[w["Conventional"]], c=[w["Arbitrary"]],
80
- mode='markers',
81
- marker=dict(size=20, color="orange", opacity=0.6, line=dict(width=2, color="white")),
82
- name="Torch"
83
- ))
84
-
85
- # --- Capture Click Events ---
86
- selected = plotly_events(fig, click_event=True, hover_event=False, select_event=False)
87
-
88
- if selected:
89
- # Directly use a,b,c from event dict
90
- point = selected[0]
91
- st.session_state.weights = {
92
- "Intentional": round(point["a"], 4),
93
- "Conventional": round(point["b"], 4),
94
- "Arbitrary": round(point["c"], 4)
95
- }
96
- st.experimental_rerun()
97
 
98
- # --- Torch highlighting logic ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
  explanations = []
100
- for (a, b, c, label, text) in locations:
101
- dist = np.sqrt(
102
- (a - w["Intentional"])**2 +
103
- (b - w["Conventional"])**2 +
104
- (c - w["Arbitrary"])**2
105
- )
106
  if dist <= torch_radius:
107
- explanations.append((label, text))
 
 
 
 
 
 
 
 
108
 
109
- # Show explanations if any
110
  if len(explanations) > 0:
111
  text_to_show = ""
112
  for label, labeltext in explanations:
113
- text_to_show += f"<b>{label}:</b> {labeltext}<br>"
114
- add_red_text(text_to_show)
 
1
  import streamlit as st
2
+ import matplotlib.pyplot as plt
3
  import numpy as np
 
 
4
  from utils import add_navigation, add_instruction_text, add_red_text
5
 
6
+ plt.style.use('dark_background')
7
 
8
  def render():
9
  add_navigation("txt_ica", "txt_multiverse")
 
11
  add_instruction_text(
12
  """
13
  Explore the intention-convention-arbitrariness (ICA) framework.<br>
14
+ Use different sliders to uncover examples in the ICA triangle.
15
  """
16
  )
17
 
 
22
  "Arbitrary": 0.34
23
  }
24
 
25
+ col1, col2 = st.columns([0.6, 0.4])
26
+ with col1:
27
+ control_choice = st.radio(
28
+ "Select dimension to adjust",
29
+ ["Intentional", "Conventional", "Arbitrary"],
30
+ horizontal=True,
31
+ label_visibility="collapsed"
32
+ )
33
+
34
+ # Current values
35
+ w = st.session_state.weights
36
+ current_value = w[control_choice]
37
+
38
+ with col2:
39
+ new_value = st.slider(control_choice, 0.0, 1.0, current_value, 0.01, label_visibility="collapsed")
40
+
41
+ # Adjust others proportionally
42
+ diff = new_value - current_value
43
+ others = [k for k in w.keys() if k != control_choice]
44
+ total_other = w[others[0]] + w[others[1]]
45
+
46
+ if total_other > 0:
47
+ w[others[0]] -= diff * (w[others[0]] / total_other)
48
+ w[others[1]] -= diff * (w[others[1]] / total_other)
49
+
50
+ w[control_choice] = new_value
51
+
52
+ # Clamp small floating point errors
53
+ for k in w:
54
+ w[k] = max(0.0, min(1.0, round(w[k], 4)))
55
+
56
+ # Normalize back to sum=1
57
+ total = sum(w.values())
58
+ if total != 0:
59
+ for k in w:
60
+ w[k] = round(w[k] / total, 4)
61
+
62
+ # Triangle vertices
63
+ vertices = np.array([
64
+ [0.5, np.sqrt(3)/2], # Intentional
65
+ [0, 0], # Conventional
66
+ [1, 0] # Arbitrary
67
+ ])
68
 
69
+ # Point from barycentric coords
70
+ point = (
71
+ w["Intentional"] * vertices[0] +
72
+ w["Conventional"] * vertices[1] +
73
+ w["Arbitrary"] * vertices[2]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  )
75
 
76
+ # Plot
77
+ fig, ax = plt.subplots()
78
+ ax.plot(*np.append(vertices, [vertices[0]], axis=0).T)
79
+ # ax.scatter(vertices[:,0], vertices[:,1], c=["blue", "green", "red"], s=100)
80
+ ax.text(*vertices[0], "Intentional", ha="center", va="bottom", color="green", weight="heavy")
81
+ ax.text(*vertices[1], "Conventional", ha="right", va="top", color="green", weight="heavy")
82
+ ax.text(*vertices[2], "Arbitrary", ha="left", va="top", color="green", weight="heavy")
83
+ ax.scatter(point[0], point[1], c="white", s=10000)
84
+ ax.scatter(point[0], point[1], c="orange", s=10000, zorder=5, alpha=0.3)
85
+ ax.set_aspect("equal")
86
+ ax.axis("off")
87
+
88
+ fig.patch.set_alpha(0)
89
+ ax.patch.set_alpha(0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
 
91
+ # --- Dummy points scattered inside triangle ---
92
+ # (x, y, text)
93
+ locations = [
94
+ (0.9, 0.1, "Random Seeds", "Random Seeds are highly arbitrary, without any convention or intentionality.", "left", "bottom"),
95
+ (0.35, 0.06, "Neural networks for Tabular Data", "Using neural networks of some arbitrary size (hidden layers) for a setting where \
96
+ they are not needed is highly conventional, a bit arbitrary, and has very low intentionality.", "left", "bottom"),
97
+ (0.4, 0.5, "Pre-trained LLM for a Complex Task", "Using a high performing LLM for a complex task is intentional, however, it also has \
98
+ conventionality to it, as a specialized model could have worked, depending on context.\
99
+ No arbitrariness.", "right", "bottom"),
100
+ (0.5, 0.7, "Best Bias Mitigation for a Particular Setup", "Choosing the most appropriate bias mitigation technique,\
101
+ specialized for the particular context, is highly intentional", "center", "bottom"),
102
+ (0.7, 0.5, "Randomly chosen Regularization Technique", "Adding regularization to improve robustness, but choosing the regularization technique randomly,\
103
+ creates a decision that is intentional and arbitrary, while avoiding conventionality.", "left", "bottom"),
104
+ (0.1, 0.1, "ReLU Activation as Default", "Choosing ReLU activation without testing what other activations might also work,\
105
+ is a highly conventional decision.", "right", "bottom"),
106
+ ]
107
+
108
+ torch_radius = 0.177 # how far the "torch" illuminates
109
+
110
  explanations = []
111
+ # Illuminate nearby points
112
+ for (x, y, label, labeltext, ha, va) in locations:
113
+ dist = np.linalg.norm([x - point[0], y - point[1]])
 
 
 
114
  if dist <= torch_radius:
115
+ ax.scatter(x, y, c="red", s=50, zorder=6)
116
+ ax.text(x, y + 0.03, label, ha=ha, va=va, color="red", zorder=6, weight="heavy")
117
+ explanations.append((label, labeltext))
118
+ else:
119
+ ax.scatter(x, y, c="red", s=50, zorder=6, alpha=0.3)
120
+
121
+ col1, col2, col3 = st.columns([0.3, 1, 0.3])
122
+ with col2:
123
+ st.pyplot(fig)
124
 
 
125
  if len(explanations) > 0:
126
  text_to_show = ""
127
  for label, labeltext in explanations:
128
+ text_to_show += "<b>" + label + ":</b> " + labeltext + "<br>"
129
+ add_red_text(text_to_show)